From 4371116118666b9e63c440bc412f1ca4840f4d44 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 18 Dec 2024 06:03:57 +0100 Subject: [PATCH 01/60] [core] Core read_model supports user configuration (#28066) ### Details: - Extend `Core::read_model` by additional optimal parameter, properties. The properties passed to `read_model` are valid only for this usage and will not change `Core` instance state. - Update python API to support new parameter. ### Tickets: - CVS-158761 --------- Signed-off-by: Raasz, Pawel --- src/bindings/python/src/openvino/_ov_api.py | 17 +++- .../python/src/pyopenvino/core/core.cpp | 28 ++++-- .../python/tests/test_runtime/test_core.py | 36 ++++++++ src/frontends/ir/tests/frontend_test_mmap.cpp | 36 ++++++++ .../dev_api/openvino/runtime/icore.hpp | 5 +- .../include/openvino/runtime/core.hpp | 52 +++++++++-- src/inference/src/cpp/core.cpp | 15 ++- src/inference/src/dev/core_impl.cpp | 40 ++++---- src/inference/src/dev/core_impl.hpp | 8 +- src/inference/src/dev/iplugin.cpp | 7 +- .../tests/functional/caching_test.cpp | 91 ++++++++++++++++++- .../mocks/openvino/runtime/mock_icore.hpp | 5 +- 12 files changed, 297 insertions(+), 43 deletions(-) diff --git a/src/bindings/python/src/openvino/_ov_api.py b/src/bindings/python/src/openvino/_ov_api.py index 08f6873d1b8f0f..1631bc42051418 100644 --- a/src/bindings/python/src/openvino/_ov_api.py +++ b/src/bindings/python/src/openvino/_ov_api.py @@ -495,11 +495,22 @@ class Core(CoreBase): between several Core instances. The recommended way is to have a single Core instance per application. """ - def read_model(self, model: Union[str, bytes, object], weights: Union[object, str, bytes, Tensor] = None) -> Model: - if weights is not None: + def read_model( + self, + model: Union[str, bytes, object], + weights: Union[object, str, bytes, Tensor] = None, + config: Optional[dict] = None + ) -> Model: + config = {} if config is None else config + + if isinstance(weights, Tensor): return Model(super().read_model(model, weights)) + elif isinstance(model, bytes): + return Model(super().read_model(model, bytes() if weights is None else weights)) + elif weights is None: + return Model(super().read_model(model, config=config)) else: - return Model(super().read_model(model)) + return Model(super().read_model(model, weights, config)) def compile_model( self, diff --git a/src/bindings/python/src/pyopenvino/core/core.cpp b/src/bindings/python/src/pyopenvino/core/core.cpp index 68e3e5cc4841ed..6dae6508d630f3 100644 --- a/src/bindings/python/src/pyopenvino/core/core.cpp +++ b/src/bindings/python/src/pyopenvino/core/core.cpp @@ -393,10 +393,17 @@ void regclass_Core(py::module m) { cls.def( "read_model", - (std::shared_ptr(ov::Core::*)(const std::string&, const std::string&) const) & ov::Core::read_model, - py::call_guard(), + [](ov::Core& self, + const std::string& model_path, + const std::string& weight_path, + const std::map& config) { + const auto any_map = Common::utils::properties_to_any_map(config); + py::gil_scoped_release release; + return self.read_model(model_path, weight_path, any_map); + }, py::arg("model"), py::arg("weights") = "", + py::arg("config") = py::dict(), R"( Reads models from IR / ONNX / PDPD / TF and TFLite formats. @@ -412,6 +419,8 @@ void regclass_Core(py::module m) { For TF format (*.pb) weights parameter is not used. For TFLite format (*.tflite) weights parameter is not used. :type weights: str + :param config: Optional map of pairs: (property name, property value) relevant only for this read operation. + :type config: dict, optional :return: A model. :rtype: openvino.runtime.Model )"); @@ -438,7 +447,10 @@ void regclass_Core(py::module m) { cls.def( "read_model", - [](ov::Core& self, py::object model_path, py::object weights_path) { + [](ov::Core& self, + py::object model_path, + py::object weights_path, + const std::map& config) { if (py::isinstance(model_path, pybind11::module::import("io").attr("BytesIO"))) { std::stringstream _stream; model_path.attr("seek")(0); // Always rewind stream! @@ -466,8 +478,9 @@ void regclass_Core(py::module m) { if (!py::isinstance(weights_path)) { weights_path_cpp = py::str(weights_path); } + const auto any_map = Common::utils::properties_to_any_map(config); py::gil_scoped_release release; - return self.read_model(model_path_cpp, weights_path_cpp); + return self.read_model(model_path_cpp, weights_path_cpp, any_map); } std::stringstream str; @@ -477,6 +490,7 @@ void regclass_Core(py::module m) { }, py::arg("model"), py::arg("weights") = py::none(), + py::arg("config") = py::dict(), R"( Reads models from IR / ONNX / PDPD / TF and TFLite formats. @@ -492,6 +506,8 @@ void regclass_Core(py::module m) { For TF format (*.pb): weights parameter is not used. For TFLite format (*.tflite) weights parameter is not used. :type weights: pathlib.Path + :param config: Optional map of pairs: (property name, property value) relevant only for this read operation. + :type config: dict, optional :return: A model. :rtype: openvino.runtime.Model )"); @@ -653,7 +669,7 @@ void regclass_Core(py::module m) { :param properties: Optional dict of pairs: (property name, property value) :type properties: dict :return: Pairs a operation name -> a device name supporting this operation. - :rtype: dict + :rtype: dict )"); cls.def("add_extension", @@ -671,7 +687,7 @@ void regclass_Core(py::module m) { py::arg("extension"), R"( Registers an extension to a Core object. - + :param extension: Extension object. :type extension: openvino.runtime.Extension )"); diff --git a/src/bindings/python/tests/test_runtime/test_core.py b/src/bindings/python/tests/test_runtime/test_core.py index d147ce2d6bcab2..b9cf5735f01e4c 100644 --- a/src/bindings/python/tests/test_runtime/test_core.py +++ b/src/bindings/python/tests/test_runtime/test_core.py @@ -140,6 +140,24 @@ def test_read_model_from_ir(request, tmp_path): assert isinstance(model, Model) +# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request +def test_read_model_from_ir_with_user_config(request, tmp_path): + core = Core() + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) + relu_model = get_relu_model() + serialize(relu_model, xml_path, bin_path) + + core_cache_dir = core.get_property("CACHE_DIR") + cache_path = tmp_path / Path("cache") + + model = core.read_model(xml_path, bin_path, config={"CACHE_DIR": f"{cache_path}"}) + + assert isinstance(model, Model) + assert core_cache_dir == core.get_property("CACHE_DIR") + assert os.path.exists(cache_path) + os.rmdir(cache_path) + + # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_read_model_from_tensor(request, tmp_path): core = Core() @@ -178,6 +196,24 @@ def test_read_model_as_path(request, tmp_path): assert isinstance(model, Model) +# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request +def test_read_model_as_path_with_user_config(request, tmp_path): + core = Core() + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) + relu_model = get_relu_model() + serialize(relu_model, xml_path, bin_path) + + core_cache_dir = core.get_property("CACHE_DIR") + cache_path = tmp_path / Path("cache_as_path") + + model = core.read_model(Path(xml_path), Path(bin_path), config={"CACHE_DIR": f"{cache_path}"}) + + assert isinstance(model, Model) + assert core_cache_dir == core.get_property("CACHE_DIR") + assert os.path.exists(cache_path) + os.rmdir(cache_path) + + # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_read_model_from_buffer(request, tmp_path): core = Core() diff --git a/src/frontends/ir/tests/frontend_test_mmap.cpp b/src/frontends/ir/tests/frontend_test_mmap.cpp index 6b9ede14fa7d55..a58e3e29ff0a75 100644 --- a/src/frontends/ir/tests/frontend_test_mmap.cpp +++ b/src/frontends/ir/tests/frontend_test_mmap.cpp @@ -52,6 +52,42 @@ TEST_F(IRFrontendMMapTestsAdvanced, core_enable_mmap_property) { auto model = core.read_model(xmlFileName); auto rss_read = ov::test::utils::getVmRSSInKB(); + if (is_mmap != core.get_property("", ov::enable_mmap)) { + std::cout << "Test failed: core property is not set correctly" << std::endl; + exit(1); + } + + bool is_weights_read = (rss_read - rss_init) > REF_RSS; + if (is_mmap == is_weights_read) { + std::cerr << "Test failed: mmap is " << (is_mmap ? "enabled" : "disabled") << ", but weights are " + << (is_weights_read ? "read" : "not read") << " in RAM" << std::endl; + exit(1); + } + std::cerr << "Test passed" << std::endl; + exit(0); + }; + + for (const auto is_mmap : {true, false}) + // Run test in a separate process to not affect RAM values by previous tests + EXPECT_EXIT(test(is_mmap), ::testing::ExitedWithCode(0), "Test passed"); +} + +TEST_F(IRFrontendMMapTestsAdvanced, core_enable_mmap_property_user_config) { + // Test checks that with enabled `mmap` .bin file + // isn't read into RAM on `read_model` stage. + // Otherwise, with disabled `mmap` .bin file should + // be in RAM + + auto test = [&](const bool& is_mmap) { + auto rss_init = ov::test::utils::getVmRSSInKB(); + auto model = core.read_model(xmlFileName, {}, {{ov::enable_mmap(is_mmap)}}); + auto rss_read = ov::test::utils::getVmRSSInKB(); + + if (true != core.get_property("", ov::enable_mmap)) { + std::cout << "Test failed: core property changed by user configuration" << std::endl; + exit(1); + } + bool is_weights_read = (rss_read - rss_init) > REF_RSS; if (is_mmap == is_weights_read) { std::cerr << "Test failed: mmap is " << (is_mmap ? "enabled" : "disabled") << ", but weights are " diff --git a/src/inference/dev_api/openvino/runtime/icore.hpp b/src/inference/dev_api/openvino/runtime/icore.hpp index 659b9c5c0f5788..cc2c94e724ab41 100644 --- a/src/inference/dev_api/openvino/runtime/icore.hpp +++ b/src/inference/dev_api/openvino/runtime/icore.hpp @@ -60,9 +60,12 @@ class OPENVINO_RUNTIME_API ICore { * @param model_path path to IR file * @param bin_path path to bin file, if path is empty, will try to read bin file with the same name as xml and * if bin file with the same name was not found, will load IR without weights. + * @param properties Optional map of pairs: (property name, property value) relevant only for this read operation. * @return shared pointer to ov::Model */ - virtual std::shared_ptr read_model(const std::string& model_path, const std::string& bin_path) const = 0; + virtual std::shared_ptr read_model(const std::string& model_path, + const std::string& bin_path, + const AnyMap& properties) const = 0; virtual ov::AnyMap create_compile_config(const std::string& device_name, const ov::AnyMap& origConfig) const = 0; diff --git a/src/inference/include/openvino/runtime/core.hpp b/src/inference/include/openvino/runtime/core.hpp index c13432d664e736..2ca6dc83bcf726 100644 --- a/src/inference/include/openvino/runtime/core.hpp +++ b/src/inference/include/openvino/runtime/core.hpp @@ -79,11 +79,14 @@ class OPENVINO_RUNTIME_API Core { * For the following file formats the `bin_path` parameter is not used: * * ONNX format (*.onnx) * * PDPD (*.pdmodel) - * * TF (*.pb) + * * TF (*.pb, *.meta, SavedModel directory) * * TFLite (*.tflite) + * @param properties Optional map of pairs: (property name, property value) relevant only for this read operation. * @return A model. */ - std::shared_ptr read_model(const std::wstring& model_path, const std::wstring& bin_path = {}) const; + std::shared_ptr read_model(const std::wstring& model_path, + const std::wstring& bin_path = {}, + const ov::AnyMap& properties = {}) const; #endif /** @@ -96,17 +99,54 @@ class OPENVINO_RUNTIME_API Core { * For the following file formats the `bin_path` parameter is not used: * * ONNX format (*.onnx) * * PDPD (*.pdmodel) - * * TF (*.pb) + * * TF (*.pb, *.meta, SavedModel directory) * * TFLite (*.tflite) + * @param properties Optional map of pairs: (property name, property value) relevant only for this read operation. * @return A model. * @{ */ - std::shared_ptr read_model(const std::string& model_path, const std::string& bin_path = {}) const; + std::shared_ptr read_model(const std::string& model_path, + const std::string& bin_path = {}, + const ov::AnyMap& properties = {}) const; #ifdef OPENVINO_CPP_VER_17 template >* = nullptr> - std::shared_ptr read_model(const Path& model_path, const Path& bin_path = {}) const { - return read_model(model_path.string(), bin_path.string()); + auto read_model(const Path& model_path, const Path& bin_path = {}, const ov::AnyMap& properties = {}) const { + return read_model(model_path.string(), bin_path.string(), properties); + } +#endif + /// @} + + /** + * @brief Reads models from IR / ONNX / PDPD / TF / TFLite file formats. + * + * @param model_path Path to a model. + * @param bin_path Path to a data file. + * For IR format (*.bin): + * * if `bin_path` is empty, will try to read a bin file with the same name as xml and + * * if the bin file with the same name is not found, will load IR without weights. + * For the following file formats the `bin_path` parameter is not used: + * * ONNX format (*.onnx) + * * PDPD (*.pdmodel) + * * TF (*.pb, *.meta, SavedModel directory) + * * TFLite (*.tflite) + * @param properties Optional pack of pairs: (property name, property value) relevant only for this read operation. + * @return A model. + * @{ + */ + template + util::EnableIfAllStringAny read_model(const std::string& model_path, + const std::string& bin_path, + Properties&&... properties) const { + return read_model(model_path, bin_path, AnyMap{std::forward(properties)...}); + } + +#ifdef OPENVINO_CPP_VER_17 + template && (sizeof...(Properties) > 0)>* = nullptr> + auto read_model(const Path& model_path, const Path& bin_path, Properties&&... properties) const { + return read_model(model_path.string(), bin_path.string(), std::forward(properties)...); } #endif /// @} diff --git a/src/inference/src/cpp/core.cpp b/src/inference/src/cpp/core.cpp index 2d6c204757bcf6..5d85fe81364a17 100644 --- a/src/inference/src/cpp/core.cpp +++ b/src/inference/src/cpp/core.cpp @@ -80,14 +80,19 @@ Core::Core(const std::string& xml_config_file) { std::map Core::get_versions(const std::string& device_name) const { OV_CORE_CALL_STATEMENT({ return _impl->get_versions(device_name); })} #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -std::shared_ptr Core::read_model(const std::wstring& model_path, const std::wstring& bin_path) const { - OV_CORE_CALL_STATEMENT( - return _impl->read_model(ov::util::wstring_to_string(model_path), ov::util::wstring_to_string(bin_path));); +std::shared_ptr Core::read_model(const std::wstring& model_path, + const std::wstring& bin_path, + const ov::AnyMap& properties) const { + OV_CORE_CALL_STATEMENT(return _impl->read_model(ov::util::wstring_to_string(model_path), + ov::util::wstring_to_string(bin_path), + properties);); } #endif -std::shared_ptr Core::read_model(const std::string& model_path, const std::string& bin_path) const { - OV_CORE_CALL_STATEMENT(return _impl->read_model(model_path, bin_path);); +std::shared_ptr Core::read_model(const std::string& model_path, + const std::string& bin_path, + const AnyMap& properties) const { + OV_CORE_CALL_STATEMENT(return _impl->read_model(model_path, bin_path, properties);); } std::shared_ptr Core::read_model(const std::string& model, const ov::Tensor& weights) const { diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index f332c7c999a548..e0e2fb109dc642 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -223,12 +223,6 @@ static const auto core_properties_names = static const auto auto_batch_properties_names = ov::util::make_array(ov::auto_batch_timeout.name(), ov::hint::allow_auto_batching.name()); - -void remove_core_properties(ov::AnyMap& properties) { - for (const auto& name : core_properties_names) { - properties.erase(name); - } -} } // namespace bool ov::is_config_applicable(const std::string& user_device_name, const std::string& subprop_device_name) { @@ -352,10 +346,8 @@ ov::Parsed ov::parseDeviceNameIntoConfig(const std::string& deviceName, // remove core properties for HW devices if (!is_virtual_device(parsed._deviceName)) { - for (const auto& name : {ov::enable_mmap.name(), ov::force_tbb_terminate.name()}) { - // note: ov::cache_dir kept as plugin may require it - parsed._config.erase(name); - } + // note: ov::cache_dir kept as plugin may require it + CoreConfig::remove_core_skip_cache_dir(parsed._config); } return parsed; } @@ -842,7 +834,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::string& mod const std::string& device_name, const ov::AnyMap& config) const { OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::LoadTime, "Core::compile_model::Path"); - auto parsed = parseDeviceNameIntoConfig(device_name, coreConfig, config); + auto parsed = parse_device_config(device_name, coreConfig, config, false); // in case of compile_model(file_name), we need to clear-up core-level properties auto plugin = get_plugin(parsed._deviceName); ov::SoPtr compiled_model; @@ -851,13 +843,13 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::string& mod if (cacheManager && device_supports_model_caching(plugin) && !is_proxy_device(plugin)) { // Skip caching for proxy plugin. HW plugin will load network from the cache + CoreConfig::remove_core_skip_cache_dir(parsed._config); CacheContent cacheContent{cacheManager, parsed._core_config.get_enable_mmap(), model_path}; cacheContent.blobId = ov::ModelCache::compute_hash(model_path, create_compile_config(plugin, parsed._config)); std::unique_ptr lock = cacheGuard.get_hash_lock(cacheContent.blobId); compiled_model = load_model_from_cache(cacheContent, plugin, parsed._config, ov::SoPtr{}, [&]() { - auto model = - ov::util::read_model(model_path, std::string{}, extensions, parsed._core_config.get_enable_mmap()); + const auto model = util::read_model(model_path, "", extensions, parsed._core_config.get_enable_mmap()); return compile_model_and_cache(plugin, model, parsed._config, {}, cacheContent); }); } else { @@ -1593,7 +1585,19 @@ void ov::CoreConfig::set(const ov::AnyMap& config) { void ov::CoreConfig::set_and_update(ov::AnyMap& config) { set(config); - remove_core_properties(config); + remove_core(config); +} + +void ov::CoreConfig::remove_core(ov::AnyMap& config) { + for (const auto& name : core_properties_names) { + config.erase(name); + } +} + +void ov::CoreConfig::remove_core_skip_cache_dir(ov::AnyMap& config) { + for (const auto& name : {ov::enable_mmap.name(), ov::force_tbb_terminate.name()}) { + config.erase(name); + } } void ov::CoreConfig::set_cache_dir_for_device(const std::string& dir, const std::string& name) { @@ -1664,9 +1668,13 @@ void ov::CoreImpl::add_mutex(const std::string& dev_name) { dev_mutexes[dev_name]; } -std::shared_ptr ov::CoreImpl::read_model(const std::string& modelPath, const std::string& binPath) const { +std::shared_ptr ov::CoreImpl::read_model(const std::string& modelPath, + const std::string& binPath, + const AnyMap& properties) const { OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::ReadTime, "CoreImpl::read_model from file"); - return ov::util::read_model(modelPath, binPath, extensions, coreConfig.get_enable_mmap()); + auto local_core_config = coreConfig; + local_core_config.set(properties); + return ov::util::read_model(modelPath, binPath, extensions, local_core_config.get_enable_mmap()); } std::shared_ptr ov::CoreImpl::read_model(const std::string& model, diff --git a/src/inference/src/dev/core_impl.hpp b/src/inference/src/dev/core_impl.hpp index 7bbab14e4d8c14..85417175c22556 100644 --- a/src/inference/src/dev/core_impl.hpp +++ b/src/inference/src/dev/core_impl.hpp @@ -55,6 +55,10 @@ class CoreConfig final { // Creating thread-safe copy of global config including shared_ptr to ICacheManager CacheConfig get_cache_config_for_device(const ov::Plugin& plugin) const; + // remove core properties + static void remove_core(ov::AnyMap& config); + static void remove_core_skip_cache_dir(ov::AnyMap& config); + private: mutable std::mutex _cacheConfigMutex; CacheConfig _cacheConfig; @@ -303,7 +307,9 @@ class CoreImpl : public ov::ICore, public std::enable_shared_from_this read_model(const std::shared_ptr& model, const std::shared_ptr& weights) const override; - std::shared_ptr read_model(const std::string& model_path, const std::string& bin_path) const override; + std::shared_ptr read_model(const std::string& model_path, + const std::string& bin_path, + const AnyMap& properties) const override; ov::SoPtr compile_model(const std::shared_ptr& model, const std::string& device_name, diff --git a/src/inference/src/dev/iplugin.cpp b/src/inference/src/dev/iplugin.cpp index 1049e39bee6f49..f8c49825ba435a 100644 --- a/src/inference/src/dev/iplugin.cpp +++ b/src/inference/src/dev/iplugin.cpp @@ -4,6 +4,7 @@ #include "openvino/runtime/iplugin.hpp" +#include "core_impl.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/op/util/shape_of_base.hpp" @@ -75,8 +76,10 @@ std::shared_ptr ov::IPlugin::compile_model(const std::string const ov::AnyMap& properties) const { auto core = get_core(); OPENVINO_ASSERT(core); - auto model = core->read_model(model_path, std::string()); - return compile_model(model, properties); + const auto model = core->read_model(model_path, {}, properties); + auto local_properties = properties; + CoreConfig::remove_core_skip_cache_dir(local_properties); + return compile_model(model, local_properties); } std::unordered_set ov::get_supported_nodes( diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index 6b1c7f938ae731..e3572dc98915b0 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -276,14 +276,14 @@ class CachingTest : public ::testing::TestWithParam model_buffer; + if (config.count(ov::internal::cached_model_buffer.name())) + model_buffer = config.at(ov::internal::cached_model_buffer.name()).as>(); + EXPECT_FALSE(model_buffer); + + std::string name; + istr >> name; + char space; + istr.read(&space, 1); + std::lock_guard lock(mock_creation_mutex); + return create_mock_compiled_model(m_models[name], mockPlugin); + })); + ON_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { + return std::vector{ov::internal::caching_properties.name(), + ov::internal::caching_with_mmap.name()}; + })); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + if (m_remoteContext) { + return; // skip the remote Context test for Multi plugin + } + int index = 0; + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); + }); + MkDirGuard guard(m_cacheDir); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(1); + testLoad([&](ov::Core& core) { + const auto config = ov::AnyMap{{ov::cache_dir(m_cacheDir)}, {ov::enable_mmap(false)}}; + m_testFunctionWithCfg(core, config); + m_testFunctionWithCfg(core, config); + }); + std::cout << "Caching Load multiple threads test completed. Tried " << index << " times" << std::endl; +} + +TEST_P(CachingTest, Load_mmap_is_not_supported_by_plugin_local_cfg) { + ON_CALL(*mockPlugin, import_model(_, _)).WillByDefault(Invoke([&](std::istream& istr, const ov::AnyMap& config) { + if (m_checkConfigCb) { + m_checkConfigCb(config); + } + std::shared_ptr model_buffer; + if (config.count(ov::internal::cached_model_buffer.name())) + model_buffer = config.at(ov::internal::cached_model_buffer.name()).as>(); + EXPECT_FALSE(model_buffer); + + std::string name; + istr >> name; + char space; + istr.read(&space, 1); + std::lock_guard lock(mock_creation_mutex); + return create_mock_compiled_model(m_models[name], mockPlugin); + })); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + if (m_remoteContext) { + return; // skip the remote Context test for Multi plugin + } + int index = 0; + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); + }); + MkDirGuard guard(m_cacheDir); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(1); + testLoad([&](ov::Core& core) { + const auto config = ov::AnyMap{{ov::cache_dir(m_cacheDir)}, {ov::enable_mmap(false)}}; + m_testFunctionWithCfg(core, config); + m_testFunctionWithCfg(core, config); + }); + std::cout << "Caching Load multiple threads test completed. Tried " << index << " times" << std::endl; +} + #if defined(ENABLE_OV_IR_FRONTEND) static std::string getTestCaseName(const testing::TestParamInfo>& obj) { diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp index 367818ebbf9572..534ba6cd1748df 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp @@ -47,7 +47,10 @@ class MockICore : public ov::ICore { (const std::string&, const ov::Tensor&, const std::string&, const ov::AnyMap&), (const)); MOCK_METHOD(std::shared_ptr, read_model, (const std::string&, const ov::Tensor&, bool), (const)); - MOCK_METHOD(std::shared_ptr, read_model, (const std::string&, const std::string&), (const)); + MOCK_METHOD(std::shared_ptr, + read_model, + (const std::string&, const std::string&, const ov::AnyMap&), + (const)); MOCK_METHOD(std::shared_ptr, read_model, (const std::shared_ptr&, const std::shared_ptr&), From 691385e7dde18cf68515166f9e259c36acd3a048 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Wed, 18 Dec 2024 06:06:10 +0100 Subject: [PATCH 02/60] [RTTI] Add OPENVINO_MATCHER_PASS_RTTI definition (#28113) ### Details: - Adds RTTI definition for passes derived from `ov::pass::MatcherPass` - Applies the macro to: - common/snippets - Offline Transformations - LP Transformations - Frontends - Core ### Tickets: - CVS-159217 Co-authored-by: Cathy Bao Co-authored-by: Venkat Raghavulu Co-authored-by: Ivan Tikhonov --------- Signed-off-by: Tomasz Jankowski --- .../low_precision/base_matcher_pass.hpp | 1 + .../convert_subtract_constant.hpp | 2 +- .../create_precisions_dependent_attribute.hpp | 1 + .../low_precision/layer_transformation.hpp | 1 + .../include/low_precision/markup_bias.hpp | 4 ++-- .../propagate_through_precision_preserved.hpp | 4 +++- .../low_precision/propagate_to_input.hpp | 3 ++- .../pull_reshape_through_dequantization.hpp | 2 +- .../pull_transpose_through_dequantization.hpp | 2 +- .../update_shared_precision_preserved.hpp | 4 +++- .../include/compress_quantize_weights.hpp | 4 ++-- .../include/pruning.hpp | 2 +- .../src/pruning/init_masks.cpp | 2 ++ .../src/pruning/propagate_masks.cpp | 15 ++++++++++++ .../pass/broadcast_to_movebroadcast.hpp | 3 ++- .../snippets/pass/collapse_subgraph.hpp | 2 +- .../snippets/pass/common_optimizations.hpp | 2 +- .../snippets/pass/convert_constants.hpp | 1 + .../pass/convert_power_to_powerstatic.hpp | 3 ++- .../pass/explicit_transpose_matmul_inputs.hpp | 2 +- .../pass/extract_reshapes_from_mha.hpp | 2 +- .../include/snippets/pass/fc_tokenization.hpp | 2 +- .../snippets/pass/fq_decomposition.hpp | 1 + .../snippets/pass/fuse_transpose_brgemm.hpp | 4 ++-- .../snippets/pass/gn_decomposition.hpp | 4 ++-- .../include/snippets/pass/gn_tokenization.hpp | 4 ++-- .../snippets/pass/insert_movebroadcast.hpp | 1 + .../snippets/pass/matmul_to_brgemm.hpp | 2 +- .../snippets/pass/mha_tokenization.hpp | 2 +- .../pass/reduce_to_snippets_reduce.hpp | 3 ++- .../snippets/pass/softmax_decomposition.hpp | 2 +- .../pass/softmax_reshape_elimination.hpp | 1 + .../snippets/pass/transform_convert.hpp | 1 + .../snippets/pass/transpose_decomposition.hpp | 2 +- src/core/include/openvino/core/rtti.hpp | 15 ++++++------ src/core/include/openvino/op/op.hpp | 24 +++++++++---------- .../include/openvino/pass/matcher_pass.hpp | 14 ++++++++++- src/core/tests/graph_rewrite.cpp | 8 ++++--- src/core/tests/matcher_pass.cpp | 1 + src/core/tests/pass_config.cpp | 6 ++--- .../src/extension/decoder_transformation.cpp | 1 + .../internal/pass/transform_fakequantize.hpp | 4 ++-- .../paddle/src/internal/pass/transform_if.hpp | 2 +- .../internal/pass/transform_tensorarray.hpp | 2 +- .../src/internal/pass/transform_while.hpp | 2 +- .../append_list_unpack_replacer.hpp | 2 +- .../src/transforms/aten_cat_replacer.hpp | 4 ++-- .../src/transforms/aten_getitem_replacer.hpp | 2 +- .../transforms/aten_index_put_replacer.hpp | 2 +- .../src/transforms/aten_index_replacer.hpp | 2 +- .../aten_stack_list_construct_replacer.hpp | 2 +- .../src/transforms/einsum_list_construct.hpp | 4 ++-- .../index_loop_getitem_replacer.hpp | 2 +- .../transforms/irfftn_complex_replacer.hpp | 2 +- .../src/transforms/listconstruct_replacer.hpp | 2 +- .../min_max_prim_list_construct_replacer.hpp | 2 +- .../transforms/prim_list_construct_pad.hpp | 4 ++-- .../transforms/prim_list_unpack_replacer.hpp | 4 ++-- .../src/transforms/quantized_node_remover.hpp | 2 +- .../src/transforms/remove_packing_ops.hpp | 4 ++-- .../src/transforms/reverseprop_resolver.hpp | 2 +- .../src/transforms/rfftn_complex_replacer.hpp | 2 +- .../transforms/string_equality_replacer.hpp | 2 +- .../torchfx_gptq_pattern_replacer.hpp | 4 ++-- .../src/transforms/tuple_unpack_replacer.hpp | 4 ++-- .../src/transforms/u4_block_repack.hpp | 4 ++-- .../uninitialized_variable_resolve.hpp | 2 +- .../embedding_segments_feature_fusing.hpp | 2 +- .../tensor_array_v3_replacer.hpp | 2 +- .../tensor_list_ops_resolver.hpp | 10 ++++---- .../rfft2d_complex_abs.h | 2 +- .../tflite_quantize_resolver.hpp | 4 ++-- 72 files changed, 150 insertions(+), 100 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/base_matcher_pass.hpp b/src/common/low_precision_transformations/include/low_precision/base_matcher_pass.hpp index f3a217e9d25cab..63f69b9e258d51 100644 --- a/src/common/low_precision_transformations/include/low_precision/base_matcher_pass.hpp +++ b/src/common/low_precision_transformations/include/low_precision/base_matcher_pass.hpp @@ -19,6 +19,7 @@ class LP_TRANSFORMATIONS_API BaseMatcherPass; class LP_TRANSFORMATIONS_API ov::pass::low_precision::BaseMatcherPass : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("low_precision::BaseMatcherPass"); BaseMatcherPass(const AttributeParameters& params = AttributeParameters()); AttributeParameters params; }; diff --git a/src/common/low_precision_transformations/include/low_precision/convert_subtract_constant.hpp b/src/common/low_precision_transformations/include/low_precision/convert_subtract_constant.hpp index d89384a8cd169f..91734aef48f399 100644 --- a/src/common/low_precision_transformations/include/low_precision/convert_subtract_constant.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convert_subtract_constant.hpp @@ -31,6 +31,6 @@ class LP_TRANSFORMATIONS_API ConvertSubtractConstant; */ class ov::pass::low_precision::ConvertSubtractConstant : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSubtractConstant", "0"); + OPENVINO_MATCHER_PASS_RTTI("low_precision::ConvertSubtractConstant"); ConvertSubtractConstant(const std::vector& constantPrecisions = {}); }; diff --git a/src/common/low_precision_transformations/include/low_precision/create_precisions_dependent_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/create_precisions_dependent_attribute.hpp index c57d15dde899bd..a9fb24840aa19e 100644 --- a/src/common/low_precision_transformations/include/low_precision/create_precisions_dependent_attribute.hpp +++ b/src/common/low_precision_transformations/include/low_precision/create_precisions_dependent_attribute.hpp @@ -40,6 +40,7 @@ class CreatePrecisionsDependentAttribute; template class ov::pass::low_precision::CreatePrecisionsDependentAttribute : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("low_precision::CreatePrecisionsDependentAttribute"); CreatePrecisionsDependentAttribute() { auto operation = pattern::wrap_type(); diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index d86dbd9470cc07..952cb3e468a17b 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -244,6 +244,7 @@ inline std::ostream &operator << (std::ostream &os, const DataPrecision& value) */ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("low_precision::LayerTransformation"); class Params { public: Params( diff --git a/src/common/low_precision_transformations/include/low_precision/markup_bias.hpp b/src/common/low_precision_transformations/include/low_precision/markup_bias.hpp index a7f46f74dc0645..a8deb0847d6dff 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_bias.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_bias.hpp @@ -23,10 +23,10 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MarkupBias : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MarkupBias", "0"); + OPENVINO_MATCHER_PASS_RTTI("low_precision::MarkupBias"); MarkupBias(); }; } // namespace low_precision } // namespace pass -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp index 0450091699065d..0a8c945d6c7fa1 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp @@ -38,7 +38,9 @@ class PropagateThroughPrecisionPreserved; template class ov::pass::low_precision::PropagateThroughPrecisionPreserved : public ov::pass::MatcherPass { public: - PropagateThroughPrecisionPreserved(const std::vector& defaultPrecisions = precision_set::get_int8_support()) { + OPENVINO_MATCHER_PASS_RTTI("low_precision::PropagateThroughPrecisionPreserved"); + PropagateThroughPrecisionPreserved( + const std::vector& defaultPrecisions = precision_set::get_int8_support()) { ov::graph_rewrite_callback callback = [&](pattern::Matcher& m) { auto node = m.get_match_root(); if (transformation_callback(node)) { diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_to_input.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_to_input.hpp index 3e550b9e950ff3..de30730fd2b151 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_to_input.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_to_input.hpp @@ -37,7 +37,8 @@ class PropagateToInput; template class ov::pass::low_precision::PropagateToInput : public ov::pass::MatcherPass { public: - PropagateToInput(const std::vector& defaultPrecisions = { ov::element::u8, ov::element::i8 }) { + OPENVINO_MATCHER_PASS_RTTI("low_precision::PropagateToInput"); + PropagateToInput(const std::vector& defaultPrecisions = {ov::element::u8, ov::element::i8}) { ov::graph_rewrite_callback callback = [&](pattern::Matcher& m) { auto node = m.get_match_root(); if (transformation_callback(node)) { diff --git a/src/common/low_precision_transformations/include/low_precision/pull_reshape_through_dequantization.hpp b/src/common/low_precision_transformations/include/low_precision/pull_reshape_through_dequantization.hpp index 75d0d3f6510048..9a3d1113cbbe8a 100644 --- a/src/common/low_precision_transformations/include/low_precision/pull_reshape_through_dequantization.hpp +++ b/src/common/low_precision_transformations/include/low_precision/pull_reshape_through_dequantization.hpp @@ -30,6 +30,6 @@ class LP_TRANSFORMATIONS_API PullReshapeThroughDequantization; */ class ov::pass::low_precision::PullReshapeThroughDequantization : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PullReshapeThroughDequantization", "0"); + OPENVINO_MATCHER_PASS_RTTI("low_precision::PullReshapeThroughDequantization"); PullReshapeThroughDequantization(const std::vector& inputPrecisions = {}); }; diff --git a/src/common/low_precision_transformations/include/low_precision/pull_transpose_through_dequantization.hpp b/src/common/low_precision_transformations/include/low_precision/pull_transpose_through_dequantization.hpp index 92e9eeebd2be52..8b16c910931230 100644 --- a/src/common/low_precision_transformations/include/low_precision/pull_transpose_through_dequantization.hpp +++ b/src/common/low_precision_transformations/include/low_precision/pull_transpose_through_dequantization.hpp @@ -30,6 +30,6 @@ class LP_TRANSFORMATIONS_API PullTransposeThroughDequantization; */ class ov::pass::low_precision::PullTransposeThroughDequantization : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PullTransposeThroughDequantization", "0"); + OPENVINO_MATCHER_PASS_RTTI("low_precision::PullTransposeThroughDequantization"); PullTransposeThroughDequantization(const std::vector& inputPrecisions = {}); }; diff --git a/src/common/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp b/src/common/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp index 4113e1d86d4bef..e462981380247a 100644 --- a/src/common/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp +++ b/src/common/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp @@ -36,7 +36,9 @@ class UpdateSharedPrecisionPreserved; template class ov::pass::low_precision::UpdateSharedPrecisionPreserved : public ov::pass::MatcherPass { public: - UpdateSharedPrecisionPreserved(const std::vector& defaultPrecisions = precision_set::get_int8_support()) { + OPENVINO_MATCHER_PASS_RTTI("low_precision::UpdateSharedPrecisionPreserved"); + UpdateSharedPrecisionPreserved( + const std::vector& defaultPrecisions = precision_set::get_int8_support()) { ov::graph_rewrite_callback callback = [&](ov::pass::pattern::Matcher& m) { auto node = m.get_match_root(); diff --git a/src/common/offline_transformations/include/compress_quantize_weights.hpp b/src/common/offline_transformations/include/compress_quantize_weights.hpp index 597b50828494a5..9b2792caf93d47 100644 --- a/src/common/offline_transformations/include/compress_quantize_weights.hpp +++ b/src/common/offline_transformations/include/compress_quantize_weights.hpp @@ -63,7 +63,7 @@ class CompressWeightsWithFakeConvert; */ class ov::pass::CompressWeightsWithFakeQuantize : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CompressWeightsWithFakeQuantize", "0"); + OPENVINO_MATCHER_PASS_RTTI("CompressWeightsWithFakeQuantize"); CompressWeightsWithFakeQuantize(); }; @@ -95,7 +95,7 @@ class ov::pass::CompressWeightsWithFakeQuantize : public ov::pass::MatcherPass { */ class ov::pass::CompressWeightsWithFakeConvert : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CompressWeightsWithFakeConvert", "0"); + OPENVINO_MATCHER_PASS_RTTI("CompressWeightsWithFakeConvert"); CompressWeightsWithFakeConvert(); }; diff --git a/src/common/offline_transformations/include/pruning.hpp b/src/common/offline_transformations/include/pruning.hpp index c71a84fe18d396..dd5374d7477ded 100644 --- a/src/common/offline_transformations/include/pruning.hpp +++ b/src/common/offline_transformations/include/pruning.hpp @@ -41,7 +41,7 @@ class ov::pass::InitMasks : public ov::pass::GraphRewrite { */ class ov::pass::InitConstMask : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("InitConstMask", "0"); + OPENVINO_MATCHER_PASS_RTTI("InitConstMask"); explicit InitConstMask( const ov::AxisSet& dims, const std::function& condition = [](const double& value) { diff --git a/src/common/offline_transformations/src/pruning/init_masks.cpp b/src/common/offline_transformations/src/pruning/init_masks.cpp index df94bfc59889c8..6cdede0b5f8252 100644 --- a/src/common/offline_transformations/src/pruning/init_masks.cpp +++ b/src/common/offline_transformations/src/pruning/init_masks.cpp @@ -22,6 +22,7 @@ class InitMatMulMask; class ov::pass::init_masks::InitConvMask : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("init_masks::InitConvMask"); InitConvMask() { auto input = pattern::any_input(); auto weights = pattern::any_input(); @@ -59,6 +60,7 @@ class ov::pass::init_masks::InitConvMask : public MatcherPass { class ov::pass::init_masks::InitMatMulMask : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("init_masks::InitMatMulMask"); InitMatMulMask() { auto a = pattern::any_input(); auto b = pattern::any_input(); diff --git a/src/common/offline_transformations/src/pruning/propagate_masks.cpp b/src/common/offline_transformations/src/pruning/propagate_masks.cpp index 40e5c98a31a30f..081a49109ec947 100644 --- a/src/common/offline_transformations/src/pruning/propagate_masks.cpp +++ b/src/common/offline_transformations/src/pruning/propagate_masks.cpp @@ -65,6 +65,7 @@ static ov::Shape broadcast_shape_to_rank(ov::Shape shape_to_broadcast, int64_t d class ov::pass::mask_propagation::MatMul : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::MatMul"); MatMul() { auto a = pattern::any_input(pattern::has_static_shape()); auto b = pattern::any_input(pattern::has_static_shape()); @@ -201,6 +202,7 @@ class ov::pass::mask_propagation::MatMul : public MatcherPass { class ov::pass::mask_propagation::Convolution : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::Convolution"); Convolution() { auto input = pattern::any_input(); auto weights = pattern::any_input(pattern::has_static_shape()); @@ -280,6 +282,7 @@ class ov::pass::mask_propagation::Convolution : public MatcherPass { class ov::pass::mask_propagation::GroupConvolution : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::GroupConvolution"); GroupConvolution() { auto input = pattern::any_input(pattern::has_static_dim(1)); auto weights = pattern::any_input(pattern::has_static_shape()); @@ -366,6 +369,7 @@ class ov::pass::mask_propagation::GroupConvolution : public MatcherPass { class ov::pass::mask_propagation::GroupConvolutionReshape : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::GroupConvolutionReshape"); GroupConvolutionReshape() { auto input = pattern::any_input(pattern::has_static_shape()); auto shape = pattern::any_input(); @@ -456,6 +460,7 @@ class ov::pass::mask_propagation::GroupConvolutionReshape : public MatcherPass { class ov::pass::mask_propagation::Elementwise : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::Elementwise"); Elementwise() { auto input = pattern::any_input(); auto weights = pattern::any_input(); @@ -646,6 +651,7 @@ class ov::pass::mask_propagation::Elementwise : public MatcherPass { class ov::pass::mask_propagation::FakeQuantize : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::FakeQuantize"); FakeQuantize() { auto input = pattern::any_input(pattern::has_static_shape()); auto input_low = pattern::any_input(pattern::has_static_shape()); @@ -758,6 +764,7 @@ class ov::pass::mask_propagation::FakeQuantize : public MatcherPass { class ov::pass::mask_propagation::Concat : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::Concat"); Concat() { auto concat = pattern::wrap_type(pattern::has_static_shape()); @@ -864,6 +871,7 @@ class ov::pass::mask_propagation::Concat : public MatcherPass { class ov::pass::mask_propagation::PassThrough : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::PassThrough"); PassThrough() { auto unary_op = pattern::wrap_type(); @@ -1117,6 +1126,7 @@ static std::vector collect_dims_attrs(const std::vector dims class ov::pass::mask_propagation::Reshape : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::Reshape"); Reshape() { auto inputs = pattern::any_input(pattern::has_static_shape()); auto weights = pattern::any_input(); @@ -1373,6 +1383,7 @@ class ov::pass::mask_propagation::Reshape : public MatcherPass { class ov::pass::mask_propagation::Transpose : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::Transpose"); Transpose() { auto input = pattern::any_input(); auto weights = pattern::any_input(); @@ -1480,6 +1491,7 @@ static ov::Mask::Ptr create_connect_split_output_mask(ov::Mask::Ptr input_mask, class ov::pass::mask_propagation::VariadicSplit : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::VariadicSplit"); VariadicSplit() { auto input_pattern = pattern::any_input(pattern::has_static_rank()); auto axis_pattern = pattern::wrap_type(); @@ -1547,6 +1559,7 @@ class ov::pass::mask_propagation::VariadicSplit : public MatcherPass { class ov::pass::mask_propagation::Split : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::Split"); Split() { auto input_pattern = pattern::any_input(pattern::has_static_rank()); auto axis_pattern = pattern::wrap_type(); @@ -1597,6 +1610,7 @@ class ov::pass::mask_propagation::Split : public MatcherPass { class ov::pass::mask_propagation::StopPropagation : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::StopPropagation"); StopPropagation() { auto any_node = pattern::any_input(); @@ -1654,6 +1668,7 @@ class ov::pass::mask_propagation::StopPropagation : public MatcherPass { class ov::pass::mask_propagation::SkipPropagation : public MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("mask_propagation::SkipPropagation"); SkipPropagation() { // Skip mask propagation for ShapeOf operation to prevent this opearation to be // processed as stop op. diff --git a/src/common/snippets/include/snippets/pass/broadcast_to_movebroadcast.hpp b/src/common/snippets/include/snippets/pass/broadcast_to_movebroadcast.hpp index 242d490b2fab05..e0b51364973654 100644 --- a/src/common/snippets/include/snippets/pass/broadcast_to_movebroadcast.hpp +++ b/src/common/snippets/include/snippets/pass/broadcast_to_movebroadcast.hpp @@ -19,10 +19,11 @@ namespace pass { */ class BroadcastToMoveBroadcast: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::BroadcastToMoveBroadcast"); BroadcastToMoveBroadcast(); }; } // namespace pass } // namespace snippets -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/collapse_subgraph.hpp b/src/common/snippets/include/snippets/pass/collapse_subgraph.hpp index 41236df12aa0ef..3363ec09eb7c39 100644 --- a/src/common/snippets/include/snippets/pass/collapse_subgraph.hpp +++ b/src/common/snippets/include/snippets/pass/collapse_subgraph.hpp @@ -35,7 +35,7 @@ namespace pass { */ class TokenizeSnippets: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TokenizeSnippets", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::TokenizeSnippets"); explicit TokenizeSnippets(const SnippetsTokenization::Config& config); static bool AppropriateForSubgraph(const std::shared_ptr&); diff --git a/src/common/snippets/include/snippets/pass/common_optimizations.hpp b/src/common/snippets/include/snippets/pass/common_optimizations.hpp index 1ff2d6293db649..94224d7800777f 100644 --- a/src/common/snippets/include/snippets/pass/common_optimizations.hpp +++ b/src/common/snippets/include/snippets/pass/common_optimizations.hpp @@ -19,7 +19,7 @@ class CommonOptimizations : public ov::pass::MatcherPass { friend class SplitDimensionM; public: - OPENVINO_RTTI("CommonOptimizations", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::CommonOptimizations"); CommonOptimizations(const SnippetsTokenization::Config& config); }; diff --git a/src/common/snippets/include/snippets/pass/convert_constants.hpp b/src/common/snippets/include/snippets/pass/convert_constants.hpp index 629528a1700959..cd53d6db0c82b6 100644 --- a/src/common/snippets/include/snippets/pass/convert_constants.hpp +++ b/src/common/snippets/include/snippets/pass/convert_constants.hpp @@ -19,6 +19,7 @@ namespace pass { */ class ConvertConstantsToScalars: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::ConvertConstantsToScalars"); ConvertConstantsToScalars(); }; diff --git a/src/common/snippets/include/snippets/pass/convert_power_to_powerstatic.hpp b/src/common/snippets/include/snippets/pass/convert_power_to_powerstatic.hpp index a2274837fb7c3a..b3aea4b0cb91ca 100644 --- a/src/common/snippets/include/snippets/pass/convert_power_to_powerstatic.hpp +++ b/src/common/snippets/include/snippets/pass/convert_power_to_powerstatic.hpp @@ -17,9 +17,10 @@ namespace pass { */ class ConvertPowerToPowerStatic: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::ConvertPowerToPowerStatic"); ConvertPowerToPowerStatic(); }; } // namespace pass } // namespace snippets -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/explicit_transpose_matmul_inputs.hpp b/src/common/snippets/include/snippets/pass/explicit_transpose_matmul_inputs.hpp index 5aeee244a76877..f4f577dd58edcd 100644 --- a/src/common/snippets/include/snippets/pass/explicit_transpose_matmul_inputs.hpp +++ b/src/common/snippets/include/snippets/pass/explicit_transpose_matmul_inputs.hpp @@ -23,7 +23,7 @@ namespace pass { */ class ExplicitTransposeMatMulInputs: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ExplicitTransposeMatMulInputs", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::ExplicitTransposeMatMulInputs"); ExplicitTransposeMatMulInputs(); // Return `True` if all inputs (except 0-th input) have scalar shape. Otherwise returns `False` diff --git a/src/common/snippets/include/snippets/pass/extract_reshapes_from_mha.hpp b/src/common/snippets/include/snippets/pass/extract_reshapes_from_mha.hpp index c4c23b6e247951..b4e26de8a12403 100644 --- a/src/common/snippets/include/snippets/pass/extract_reshapes_from_mha.hpp +++ b/src/common/snippets/include/snippets/pass/extract_reshapes_from_mha.hpp @@ -31,7 +31,7 @@ namespace pass { */ class ExtractReshapesFromMHA: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ExtractReshapesFromMHA", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::ExtractReshapesFromMHA"); ExtractReshapesFromMHA(); }; diff --git a/src/common/snippets/include/snippets/pass/fc_tokenization.hpp b/src/common/snippets/include/snippets/pass/fc_tokenization.hpp index 40505607341ba4..540334c3654a9f 100644 --- a/src/common/snippets/include/snippets/pass/fc_tokenization.hpp +++ b/src/common/snippets/include/snippets/pass/fc_tokenization.hpp @@ -18,7 +18,7 @@ namespace pass { */ class TokenizeFCSnippets: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TokenizeFCSnippets", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::TokenizeFCSnippets"); TokenizeFCSnippets(const SnippetsTokenization::Config& config); }; diff --git a/src/common/snippets/include/snippets/pass/fq_decomposition.hpp b/src/common/snippets/include/snippets/pass/fq_decomposition.hpp index 982835b4f27d64..1e4af6c04e22fa 100644 --- a/src/common/snippets/include/snippets/pass/fq_decomposition.hpp +++ b/src/common/snippets/include/snippets/pass/fq_decomposition.hpp @@ -49,6 +49,7 @@ namespace pass { class FakeQuantizeDecomposition : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::FakeQuantizeDecomposition"); FakeQuantizeDecomposition(); static bool getScalesAndShifts(const std::shared_ptr& fq_node, diff --git a/src/common/snippets/include/snippets/pass/fuse_transpose_brgemm.hpp b/src/common/snippets/include/snippets/pass/fuse_transpose_brgemm.hpp index d913df46caa890..3a1a2d5488ce6f 100644 --- a/src/common/snippets/include/snippets/pass/fuse_transpose_brgemm.hpp +++ b/src/common/snippets/include/snippets/pass/fuse_transpose_brgemm.hpp @@ -23,7 +23,7 @@ namespace pass { */ class FuseTransposeBrgemm: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseTransposeBrgemm", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::FuseTransposeBrgemm"); FuseTransposeBrgemm(); static bool is_supported_transpose(const Output& transpose_out); @@ -32,4 +32,4 @@ class FuseTransposeBrgemm: public ov::pass::MatcherPass { } // namespace pass } // namespace snippets -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/gn_decomposition.hpp b/src/common/snippets/include/snippets/pass/gn_decomposition.hpp index 5c7b6478dec15f..bd1c41c20b051d 100644 --- a/src/common/snippets/include/snippets/pass/gn_decomposition.hpp +++ b/src/common/snippets/include/snippets/pass/gn_decomposition.hpp @@ -17,10 +17,10 @@ namespace pass { */ class GNDecomposition: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GNDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::GNDecomposition"); GNDecomposition(); }; } // namespace pass } // namespace snippets -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/gn_tokenization.hpp b/src/common/snippets/include/snippets/pass/gn_tokenization.hpp index 220f05f0bbbc88..2d97cc9a72b4d2 100644 --- a/src/common/snippets/include/snippets/pass/gn_tokenization.hpp +++ b/src/common/snippets/include/snippets/pass/gn_tokenization.hpp @@ -18,10 +18,10 @@ namespace pass { */ class TokenizeGNSnippets : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TokenizeGNSnippets", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::TokenizeGNSnippets"); TokenizeGNSnippets(); }; } // namespace pass } // namespace snippets -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/insert_movebroadcast.hpp b/src/common/snippets/include/snippets/pass/insert_movebroadcast.hpp index 787677a22ed108..9a9f3c3757e909 100644 --- a/src/common/snippets/include/snippets/pass/insert_movebroadcast.hpp +++ b/src/common/snippets/include/snippets/pass/insert_movebroadcast.hpp @@ -18,6 +18,7 @@ namespace pass { */ class InsertMoveBroadcast: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::InsertMoveBroadcast"); InsertMoveBroadcast(); static Output BroadcastNodeLastDim(const ov::Output& value, diff --git a/src/common/snippets/include/snippets/pass/matmul_to_brgemm.hpp b/src/common/snippets/include/snippets/pass/matmul_to_brgemm.hpp index 780b153b4f0487..35d68d15554c2b 100644 --- a/src/common/snippets/include/snippets/pass/matmul_to_brgemm.hpp +++ b/src/common/snippets/include/snippets/pass/matmul_to_brgemm.hpp @@ -19,7 +19,7 @@ namespace pass { */ class MatMulToBrgemm: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MatMulToBrgemm", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::MatMulToBrgemm"); MatMulToBrgemm(); }; diff --git a/src/common/snippets/include/snippets/pass/mha_tokenization.hpp b/src/common/snippets/include/snippets/pass/mha_tokenization.hpp index 78dad6ee8e6e19..a38aaf3d5c8546 100644 --- a/src/common/snippets/include/snippets/pass/mha_tokenization.hpp +++ b/src/common/snippets/include/snippets/pass/mha_tokenization.hpp @@ -40,7 +40,7 @@ namespace pass { */ class TokenizeMHASnippets: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TokenizeMHASnippets", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::TokenizeMHASnippets"); TokenizeMHASnippets(const SnippetsTokenization::Config& config); static std::vector get_fusion_transpose_order(size_t rank); diff --git a/src/common/snippets/include/snippets/pass/reduce_to_snippets_reduce.hpp b/src/common/snippets/include/snippets/pass/reduce_to_snippets_reduce.hpp index 01d33d6e86cd9c..df5412e5a56f21 100644 --- a/src/common/snippets/include/snippets/pass/reduce_to_snippets_reduce.hpp +++ b/src/common/snippets/include/snippets/pass/reduce_to_snippets_reduce.hpp @@ -18,10 +18,11 @@ namespace pass { */ class ReduceToSnippetsReduce: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::ReduceToSnippetsReduce"); ReduceToSnippetsReduce(); }; } // namespace pass } // namespace snippets -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp b/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp index 8e24c1532a72d3..b604b49dffef73 100644 --- a/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp +++ b/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp @@ -17,7 +17,7 @@ namespace pass { */ class SoftmaxDecomposition: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftmaxDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::SoftmaxDecomposition"); SoftmaxDecomposition(); }; diff --git a/src/common/snippets/include/snippets/pass/softmax_reshape_elimination.hpp b/src/common/snippets/include/snippets/pass/softmax_reshape_elimination.hpp index b43881c1425f84..e1be576cd8418a 100644 --- a/src/common/snippets/include/snippets/pass/softmax_reshape_elimination.hpp +++ b/src/common/snippets/include/snippets/pass/softmax_reshape_elimination.hpp @@ -17,6 +17,7 @@ namespace pass { */ class SoftmaxReshapeElimination: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::SoftmaxReshapeElimination"); SoftmaxReshapeElimination(); }; diff --git a/src/common/snippets/include/snippets/pass/transform_convert.hpp b/src/common/snippets/include/snippets/pass/transform_convert.hpp index 5b087d4ae559eb..e9bb5a5ff40ca1 100644 --- a/src/common/snippets/include/snippets/pass/transform_convert.hpp +++ b/src/common/snippets/include/snippets/pass/transform_convert.hpp @@ -19,6 +19,7 @@ namespace pass { */ class TransformConvertToConvertTruncation: public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::TransformConvertToConvertTruncation"); TransformConvertToConvertTruncation(); }; diff --git a/src/common/snippets/include/snippets/pass/transpose_decomposition.hpp b/src/common/snippets/include/snippets/pass/transpose_decomposition.hpp index dbef9bc7b31c93..ea0305c331b88b 100644 --- a/src/common/snippets/include/snippets/pass/transpose_decomposition.hpp +++ b/src/common/snippets/include/snippets/pass/transpose_decomposition.hpp @@ -17,7 +17,7 @@ namespace pass { */ class TransposeDecomposition: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("snippets::pass::TransposeDecomposition"); TransposeDecomposition(); static bool is_supported_transpose(const Output& transpose_out); diff --git a/src/core/include/openvino/core/rtti.hpp b/src/core/include/openvino/core/rtti.hpp index e9681c93e45f1f..758e2de15db181 100644 --- a/src/core/include/openvino/core/rtti.hpp +++ b/src/core/include/openvino/core/rtti.hpp @@ -7,8 +7,9 @@ #include "openvino/core/type.hpp" #include "openvino/core/visibility.hpp" -#define _OPENVINO_RTTI_EXPAND(X) X -#define _OPENVINO_RTTI_DEFINITION_SELECTOR(_1, _2, _3, NAME, ...) NAME +#define _OPENVINO_RTTI_EXPAND(X) X +#define _OPENVINO_RTTI_DEFINITION_SELECTOR_2(_1, _2, NAME, ...) NAME +#define _OPENVINO_RTTI_DEFINITION_SELECTOR_3(_1, _2, _3, NAME, ...) NAME #define _OPENVINO_RTTI_WITH_TYPE(TYPE_NAME) _OPENVINO_RTTI_WITH_TYPE_VERSION(TYPE_NAME, "util") @@ -87,11 +88,11 @@ /// OPENVINO_RTTI(name, version_id) /// OPENVINO_RTTI(name, version_id, parent) /// OPENVINO_RTTI(name, version_id, parent, old_version) -#define OPENVINO_RTTI(...) \ - _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR(__VA_ARGS__, \ - _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT, \ - _OPENVINO_RTTI_WITH_TYPE_VERSION, \ - _OPENVINO_RTTI_WITH_TYPE)(__VA_ARGS__)) +#define OPENVINO_RTTI(...) \ + _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR_3(__VA_ARGS__, \ + _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT, \ + _OPENVINO_RTTI_WITH_TYPE_VERSION, \ + _OPENVINO_RTTI_WITH_TYPE)(__VA_ARGS__)) /// Note: Please don't use this macros for new operations #define BWDCMP_RTTI_DECLARATION diff --git a/src/core/include/openvino/op/op.hpp b/src/core/include/openvino/op/op.hpp index 62328429107401..bc172d103c94a2 100644 --- a/src/core/include/openvino/op/op.hpp +++ b/src/core/include/openvino/op/op.hpp @@ -14,18 +14,18 @@ #define _OPENVINO_RTTI_OP_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \ _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT(TYPE_NAME, VERSION_NAME, ::ov::op::Op) -#define OPENVINO_OP(...) \ - _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR(__VA_ARGS__, \ - _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT, \ - _OPENVINO_RTTI_OP_WITH_TYPE_VERSION, \ - _OPENVINO_RTTI_OP_WITH_TYPE)(__VA_ARGS__)) \ - /* Add accessibility for Op to the method: evaluate from the Base class \ - Usually C++ allows to use virtual methods of Base class from Derived class but if they have \ - the same name and not all of them are overrided in Derived class, the only overrided methods \ - will be available from Derived class. We need to explicitly cast Derived to Base class to \ - have an access to remaining methods or use this using. */ \ - using ov::op::Op::evaluate; \ - using ov::op::Op::evaluate_lower; \ +#define OPENVINO_OP(...) \ + _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR_3(__VA_ARGS__, \ + _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT, \ + _OPENVINO_RTTI_OP_WITH_TYPE_VERSION, \ + _OPENVINO_RTTI_OP_WITH_TYPE)(__VA_ARGS__)) \ + /* Add accessibility for Op to the method: evaluate from the Base class \ + Usually C++ allows to use virtual methods of Base class from Derived class but if they have \ + the same name and not all of them are overrided in Derived class, the only overrided methods \ + will be available from Derived class. We need to explicitly cast Derived to Base class to \ + have an access to remaining methods or use this using. */ \ + using ov::op::Op::evaluate; \ + using ov::op::Op::evaluate_lower; \ using ov::op::Op::evaluate_upper; namespace ov { diff --git a/src/core/include/openvino/pass/matcher_pass.hpp b/src/core/include/openvino/pass/matcher_pass.hpp index b17237fdf08340..e98f5ff89008fd 100644 --- a/src/core/include/openvino/pass/matcher_pass.hpp +++ b/src/core/include/openvino/pass/matcher_pass.hpp @@ -6,10 +6,22 @@ #include #include -#include +#include +#include +#include "openvino/core/rtti.hpp" #include "openvino/pass/node_registry.hpp" +#define _OPENVINO_MATCHER_PASS_RTTI_WITH_TYPE(TYPE_NAME) _OPENVINO_MATCHER_PASS_RTTI_WITH_TYPE_VERSION(TYPE_NAME, "0") + +#define _OPENVINO_MATCHER_PASS_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \ + _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT(TYPE_NAME, VERSION_NAME, ::ov::pass::MatcherPass) + +#define OPENVINO_MATCHER_PASS_RTTI(...) \ + _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR_2(__VA_ARGS__, \ + _OPENVINO_MATCHER_PASS_RTTI_WITH_TYPE_VERSION, \ + _OPENVINO_MATCHER_PASS_RTTI_WITH_TYPE)(__VA_ARGS__)) + namespace ov { using matcher_pass_callback = std::function; using graph_rewrite_callback = std::function; diff --git a/src/core/tests/graph_rewrite.cpp b/src/core/tests/graph_rewrite.cpp index 3043e851aaf1d9..20955f5a5d6b1f 100644 --- a/src/core/tests/graph_rewrite.cpp +++ b/src/core/tests/graph_rewrite.cpp @@ -23,7 +23,7 @@ using namespace ov::pass; class TestPass : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TestPass"); + OPENVINO_MATCHER_PASS_RTTI("TestPass"); TestPass() : MatcherPass() { auto divide = std::make_shared(element::f32, Shape{}, @@ -44,7 +44,7 @@ class TestPass : public ov::pass::MatcherPass { class GatherNodesPass : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GatherNodesPass"); + OPENVINO_MATCHER_PASS_RTTI("GatherNodesPass"); GatherNodesPass(NodeVector& order) : MatcherPass() { ov::matcher_pass_callback callback = [&order](pattern::Matcher& m) { order.push_back(m.get_match_root()); @@ -187,6 +187,7 @@ TEST(GraphRewriteTest, MatcherPassCallbackDerived) { class TypeBasedTestPass : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("TypeBasedTestPass"); TypeBasedTestPass() : MatcherPass() { auto divide = std::make_shared(std::make_shared(), std::make_shared()); @@ -207,6 +208,7 @@ class TypeBasedTestPass : public ov::pass::MatcherPass { class TypeBasedTestPassDerived : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("TypeBasedTestPassDerived"); TypeBasedTestPassDerived() : MatcherPass() { auto divide = std::make_shared(std::make_shared(), std::make_shared()); @@ -388,7 +390,7 @@ TEST(PassConfigTest, Test1) { class CheckConsumers : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CheckConsumers"); + OPENVINO_MATCHER_PASS_RTTI("CheckConsumers"); CheckConsumers() { ov::matcher_pass_callback callback = [](pattern::Matcher& m) -> bool { auto node = m.get_match_root(); diff --git a/src/core/tests/matcher_pass.cpp b/src/core/tests/matcher_pass.cpp index b845f496461193..ec9e5efbcdf9e8 100644 --- a/src/core/tests/matcher_pass.cpp +++ b/src/core/tests/matcher_pass.cpp @@ -21,6 +21,7 @@ using namespace std; class TestMatcherPass : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("TestMatcherPass"); TestMatcherPass() { auto m_relu1 = ov::pass::pattern::wrap_type(pattern::consumers_count(1)); auto m_relu2 = ov::pass::pattern::wrap_type({m_relu1}); diff --git a/src/core/tests/pass_config.cpp b/src/core/tests/pass_config.cpp index 566534d4f46ce4..15ebc71eef10a6 100644 --- a/src/core/tests/pass_config.cpp +++ b/src/core/tests/pass_config.cpp @@ -19,7 +19,7 @@ using namespace ov::pass; class RenameReLU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RanameReLU"); + OPENVINO_MATCHER_PASS_RTTI("RenameReLU"); RenameReLU() : MatcherPass() { auto relu = ov::pass::pattern::wrap_type(); ov::matcher_pass_callback callback = [](pattern::Matcher& m) { @@ -35,7 +35,7 @@ class RenameReLU : public ov::pass::MatcherPass { class RenameSigmoid : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RenameSigmoid"); + OPENVINO_MATCHER_PASS_RTTI("RenameSigmoid"); RenameSigmoid() : MatcherPass() { auto sigmoid = pattern::wrap_type(); ov::matcher_pass_callback callback = [](pattern::Matcher& m) { @@ -259,7 +259,7 @@ TEST(PassConfig, EnableDisablePasses9) { class TestNestedMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TestNestedMatcher"); + OPENVINO_MATCHER_PASS_RTTI("TestNestedMatcher"); TestNestedMatcher() : MatcherPass() { auto any_op = pattern::any_input(); ov::matcher_pass_callback callback = [this](pattern::Matcher& m) { diff --git a/src/frontends/common/src/extension/decoder_transformation.cpp b/src/frontends/common/src/extension/decoder_transformation.cpp index 561de1aacd79f9..4533fb89d85651 100644 --- a/src/frontends/common/src/extension/decoder_transformation.cpp +++ b/src/frontends/common/src/extension/decoder_transformation.cpp @@ -25,6 +25,7 @@ class CustomModelPass : public ov::pass::ModelPass { /// \brief Helper class to register user matcher pass initialization as a MatcherPass class CustomMatcherPass : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("frontend::CustomMatcherPass"); explicit CustomMatcherPass(const std::function& matcher_pass_initializer) { matcher_pass_initializer(this); } diff --git a/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp b/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp index 6d45edd8ea818a..19abfcbf260d73 100644 --- a/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp +++ b/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp @@ -14,7 +14,7 @@ namespace pass { class TransformFakeQuantize : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::paddle::pass::TransformFakeQuantize"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::paddle::pass::TransformFakeQuantize"); TransformFakeQuantize(); private: @@ -23,4 +23,4 @@ class TransformFakeQuantize : public ov::pass::MatcherPass { } // namespace pass } // namespace paddle } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/paddle/src/internal/pass/transform_if.hpp b/src/frontends/paddle/src/internal/pass/transform_if.hpp index 98c66800d6fea6..f71c2b026fd3e4 100644 --- a/src/frontends/paddle/src/internal/pass/transform_if.hpp +++ b/src/frontends/paddle/src/internal/pass/transform_if.hpp @@ -14,7 +14,7 @@ namespace pass { class TransformIf : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::paddle::pass::TransformIf"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::paddle::pass::TransformIf"); TransformIf(std::vector> functions); private: diff --git a/src/frontends/paddle/src/internal/pass/transform_tensorarray.hpp b/src/frontends/paddle/src/internal/pass/transform_tensorarray.hpp index c992bf1eefa4c9..227ce708ad6503 100644 --- a/src/frontends/paddle/src/internal/pass/transform_tensorarray.hpp +++ b/src/frontends/paddle/src/internal/pass/transform_tensorarray.hpp @@ -14,7 +14,7 @@ namespace pass { class TransformTensorArray : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::paddle::pass::TransformTensorArray"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::paddle::pass::TransformTensorArray"); TransformTensorArray(std::vector> functions); private: diff --git a/src/frontends/paddle/src/internal/pass/transform_while.hpp b/src/frontends/paddle/src/internal/pass/transform_while.hpp index de6f381222a554..9a604f520168fe 100644 --- a/src/frontends/paddle/src/internal/pass/transform_while.hpp +++ b/src/frontends/paddle/src/internal/pass/transform_while.hpp @@ -14,7 +14,7 @@ namespace pass { class TransformWhile : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::paddle::pass::TransformWhile"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::paddle::pass::TransformWhile"); TransformWhile(std::vector> functions); private: diff --git a/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp index d3ecd8b28fc636..84b28c8c7e21d3 100644 --- a/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class AppendListUnpackReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AppendListUnpackReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AppendListUnpackReplacer"); AppendListUnpackReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp index 8041e282445353..ef2d06da848132 100644 --- a/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp @@ -15,11 +15,11 @@ namespace pass { // This transformation replaces pattern prim::ListConstruct->aten::append{none or many}->aten::cat class AtenCatToConcat : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenCatToConcat"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AtenCatToConcat"); AtenCatToConcat(); }; } // namespace pass } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp index db99e2d65b2ef1..3d6de2c76b2c83 100644 --- a/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class AtenGetItemReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenGetItemReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AtenGetItemReplacer"); AtenGetItemReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.hpp index e74cf40e057bf2..b641ca3146d0c0 100644 --- a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.hpp @@ -15,7 +15,7 @@ namespace pass { class PYTORCH_API AtenIndexPutReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenIndexPutReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AtenIndexPutReplacer"); AtenIndexPutReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/aten_index_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_index_replacer.hpp index b9a034e3a2721f..67afefbef53f57 100644 --- a/src/frontends/pytorch/src/transforms/aten_index_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/aten_index_replacer.hpp @@ -16,7 +16,7 @@ namespace pass { // This transformation replaces pattern prim::ListConstruct->aten::index class PYTORCH_API AtenIndexToSelect : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenIndexToSelect"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AtenIndexToSelect"); AtenIndexToSelect(); }; diff --git a/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.hpp index ab7376619c4469..51b9832c2e35ae 100644 --- a/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class AtenStackListConstructReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenStackListConstructReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AtenStackListConstructReplacer"); AtenStackListConstructReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/einsum_list_construct.hpp b/src/frontends/pytorch/src/transforms/einsum_list_construct.hpp index 413c9f526214ae..ba792cbbb820af 100644 --- a/src/frontends/pytorch/src/transforms/einsum_list_construct.hpp +++ b/src/frontends/pytorch/src/transforms/einsum_list_construct.hpp @@ -14,11 +14,11 @@ namespace pass { class AtenEinsumListConstructReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenEinsumListConstructReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::AtenEinsumListConstructReplacer"); AtenEinsumListConstructReplacer(); }; } // namespace pass } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/index_loop_getitem_replacer.hpp b/src/frontends/pytorch/src/transforms/index_loop_getitem_replacer.hpp index 89627723c3d515..dac4bdafa09d27 100644 --- a/src/frontends/pytorch/src/transforms/index_loop_getitem_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/index_loop_getitem_replacer.hpp @@ -18,7 +18,7 @@ namespace pass { */ class IndexLoopGetitemReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::IndexLoopGetitemReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::IndexLoopGetitemReplacer"); IndexLoopGetitemReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp index 3aa6991aed5d4d..d3a5738a82ddbf 100644 --- a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class IRFFTNComplexReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::IRFFTNComplexReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::IRFFTNComplexReplacer"); IRFFTNComplexReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/listconstruct_replacer.hpp b/src/frontends/pytorch/src/transforms/listconstruct_replacer.hpp index 4b265d58d24541..49dac1f83d112a 100644 --- a/src/frontends/pytorch/src/transforms/listconstruct_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/listconstruct_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class ListConstructReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::ListConstructReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::ListConstructReplacer"); ListConstructReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/min_max_prim_list_construct_replacer.hpp b/src/frontends/pytorch/src/transforms/min_max_prim_list_construct_replacer.hpp index 371b3be7ff7cd0..f8dc9a2037a130 100644 --- a/src/frontends/pytorch/src/transforms/min_max_prim_list_construct_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/min_max_prim_list_construct_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class MinMaxPrimListConstructReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::MinMaxPrimListConstructReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::MinMaxPrimListConstructReplacer"); MinMaxPrimListConstructReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/prim_list_construct_pad.hpp b/src/frontends/pytorch/src/transforms/prim_list_construct_pad.hpp index 5e5322969f5285..bbd494f2b97b98 100644 --- a/src/frontends/pytorch/src/transforms/prim_list_construct_pad.hpp +++ b/src/frontends/pytorch/src/transforms/prim_list_construct_pad.hpp @@ -14,11 +14,11 @@ namespace pass { class PrimListConstructPadReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::PrimListConstructPadReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::PrimListConstructPadReplacer"); PrimListConstructPadReplacer(); }; } // namespace pass } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp index 81521716a23430..449adc8a78779d 100644 --- a/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp @@ -14,11 +14,11 @@ namespace pass { class PrimListUnpackReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::PrimListUnpackReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::PrimListUnpackReplacer"); PrimListUnpackReplacer(); }; } // namespace pass } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/quantized_node_remover.hpp b/src/frontends/pytorch/src/transforms/quantized_node_remover.hpp index e4ca3d5806d494..2ae26866f4fabb 100644 --- a/src/frontends/pytorch/src/transforms/quantized_node_remover.hpp +++ b/src/frontends/pytorch/src/transforms/quantized_node_remover.hpp @@ -20,7 +20,7 @@ namespace pass { */ class QuantizedNodeRemover : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::QuantizedNodeRemover"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::QuantizedNodeRemover"); QuantizedNodeRemover(); }; diff --git a/src/frontends/pytorch/src/transforms/remove_packing_ops.hpp b/src/frontends/pytorch/src/transforms/remove_packing_ops.hpp index 773100dfc35af9..a236a3fd081568 100644 --- a/src/frontends/pytorch/src/transforms/remove_packing_ops.hpp +++ b/src/frontends/pytorch/src/transforms/remove_packing_ops.hpp @@ -17,7 +17,7 @@ namespace pass { */ class MovePackThroughLstm : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::MovePackThroughLstm"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::MovePackThroughLstm"); MovePackThroughLstm(); }; @@ -26,7 +26,7 @@ class MovePackThroughLstm : public ov::pass::MatcherPass { */ class RemovePackingOps : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::RemovePackingOps"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::RemovePackingOps"); RemovePackingOps(); }; diff --git a/src/frontends/pytorch/src/transforms/reverseprop_resolver.hpp b/src/frontends/pytorch/src/transforms/reverseprop_resolver.hpp index a26249e4841d4b..8bc3109e479cf5 100644 --- a/src/frontends/pytorch/src/transforms/reverseprop_resolver.hpp +++ b/src/frontends/pytorch/src/transforms/reverseprop_resolver.hpp @@ -17,7 +17,7 @@ namespace pass { */ class ReversepropResolver : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::ReversepropResolver"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::ReversepropResolver"); ReversepropResolver(); }; diff --git a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp index 04ec53ab0f1561..5115e38bdf55b1 100644 --- a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class RFFTNComplexReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::RFFTNComplexReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::RFFTNComplexReplacer"); RFFTNComplexReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/string_equality_replacer.hpp b/src/frontends/pytorch/src/transforms/string_equality_replacer.hpp index 20dc3cc98b7f32..dfc826dfd600c0 100644 --- a/src/frontends/pytorch/src/transforms/string_equality_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/string_equality_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class StringEqualityReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::StringEqualityReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::StringEqualityReplacer"); StringEqualityReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.hpp b/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.hpp index 046a774e56ef8e..a77616b53813be 100644 --- a/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.hpp @@ -15,7 +15,7 @@ namespace pass { // This transformation replaces the GPTQ pattern with a Constant node class GPTQDecompressionReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::GPTQDecompressionReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::GPTQDecompressionReplacer"); GPTQDecompressionReplacer(); }; @@ -24,7 +24,7 @@ class GPTQDecompressionReplacer : public ov::pass::MatcherPass { // additional optimizations class GPTQMultPatternReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::GPTQMultPatternReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::GPTQMultPatternReplacer"); GPTQMultPatternReplacer(); }; diff --git a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp index 8d737c3d15947d..625b986f3b64b7 100644 --- a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class PrimTupleUnpackReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::PrimTupleUnpackReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::PrimTupleUnpackReplacer"); PrimTupleUnpackReplacer(); }; @@ -27,4 +27,4 @@ class TupleUnpackInBodyReplacer : public ov::pass::ModelPass { } // namespace pass } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/u4_block_repack.hpp b/src/frontends/pytorch/src/transforms/u4_block_repack.hpp index 99742ff148813a..891fd93554f558 100644 --- a/src/frontends/pytorch/src/transforms/u4_block_repack.hpp +++ b/src/frontends/pytorch/src/transforms/u4_block_repack.hpp @@ -14,13 +14,13 @@ namespace pass { class U4BlockRepack : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::U4BlockRepack"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::U4BlockRepack"); U4BlockRepack(bool is_symmetrical = false); }; class U4ConvertReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::U4ConvertReshape"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::U4ConvertReshape"); U4ConvertReshape(); }; diff --git a/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.hpp b/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.hpp index 5e0f8bd6dfdec0..30aadee2776b9e 100644 --- a/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.hpp +++ b/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.hpp @@ -19,7 +19,7 @@ namespace pass { // it borrows value of Variable that was used for some state (or node) in a graph class UninitializedVariableResolver : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::UninitializedVariableResolver"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::UninitializedVariableResolver"); UninitializedVariableResolver(); }; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/embedding_segments_feature_fusing.hpp b/src/frontends/tensorflow_common/include/helper_transforms/embedding_segments_feature_fusing.hpp index ec2ce348cb5a1b..696242f321e733 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/embedding_segments_feature_fusing.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/embedding_segments_feature_fusing.hpp @@ -20,7 +20,7 @@ namespace pass { // Such sub-graph is met in the Wide and Deep model in case of the SINGLE categorical feature. class EmbeddingSegmentSingleFeatureFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::EmbeddingSegmentSingleFeatureFusion"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::EmbeddingSegmentSingleFeatureFusion"); EmbeddingSegmentSingleFeatureFusion(); }; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/tensor_array_v3_replacer.hpp b/src/frontends/tensorflow_common/include/helper_transforms/tensor_array_v3_replacer.hpp index ad442e3e5dbe29..e0f7c20c11c9b9 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/tensor_array_v3_replacer.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/tensor_array_v3_replacer.hpp @@ -19,7 +19,7 @@ namespace pass { // that simulates initial state of tensor array container class TensorArrayV3Replacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::TensorArrayV3Replacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::TensorArrayV3Replacer"); TensorArrayV3Replacer(); }; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/tensor_list_ops_resolver.hpp b/src/frontends/tensorflow_common/include/helper_transforms/tensor_list_ops_resolver.hpp index 764b7dfc472d2a..cb587d7f665c7b 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/tensor_list_ops_resolver.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/tensor_list_ops_resolver.hpp @@ -15,14 +15,14 @@ namespace pass { // Replace internal operation TensorListReserve with a sub-graph producing initial container class TensorListReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::TensorListReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::TensorListReplacer"); TensorListReplacer(); }; // Replace internal operation TensorListSetItem with a sub-graph that inserts a new tensor into container class TensorListSetItemReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::TensorListSetItemReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::TensorListSetItemReplacer"); TensorListSetItemReplacer(); }; @@ -30,14 +30,14 @@ class TensorListSetItemReplacer : public ov::pass::MatcherPass { // that inserts a new tensor into the tail of the container class TensorListPushBackReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::TensorListPushBackReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::TensorListPushBackReplacer"); TensorListPushBackReplacer(); }; // Replace internal operation TensorListGetItem with a sub-graph that gets a tensor from container by index class TensorListGetItemReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::TensorListGetItemReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::TensorListGetItemReplacer"); TensorListGetItemReplacer(); }; @@ -45,7 +45,7 @@ class TensorListGetItemReplacer : public ov::pass::MatcherPass { // Replace TensorListSetItem and TensorListGetItem with ConcatOutput and SlicedInput class TensorListInLoopOptimization : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::TensorListInLoopOptimization"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow::pass::TensorListInLoopOptimization"); TensorListInLoopOptimization(); }; diff --git a/src/frontends/tensorflow_lite/src/tflite_transformations/rfft2d_complex_abs.h b/src/frontends/tensorflow_lite/src/tflite_transformations/rfft2d_complex_abs.h index f8599e2c7791a3..11e79cfe09a58c 100644 --- a/src/frontends/tensorflow_lite/src/tflite_transformations/rfft2d_complex_abs.h +++ b/src/frontends/tensorflow_lite/src/tflite_transformations/rfft2d_complex_abs.h @@ -24,7 +24,7 @@ namespace pass { // \-(imag)-> Unsqueeze -> Reshape -> Square / class Rfft2dSimplifier : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow_lite::pass::Rfft2dSimplifier"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow_lite::pass::Rfft2dSimplifier"); Rfft2dSimplifier(); }; diff --git a/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp b/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp index 45fd3e70722d54..584e8c55b6a9ea 100644 --- a/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp +++ b/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp @@ -18,14 +18,14 @@ namespace pass { // Fuses Convert into TFLQuantize operation class TFLQuantizeConvert : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeConvert"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeConvert"); TFLQuantizeConvert(); }; // Replaces TFLQuantize operation with FQ or sub-mul pattern if necessary class TFLQuantizeReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeReplacer"); + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeReplacer"); TFLQuantizeReplacer(); }; From 7c34fbdfc2580ad66d97a34a5c68e02130bf6cd8 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Wed, 18 Dec 2024 13:43:31 +0800 Subject: [PATCH 03/60] support offline CPU in Linux (#27870) ### Details: - *support offline CPU in Linux* - *Ignore SOC Ecore of MTL* - *enable Ecore of LNL* ### Tickets: - *CVS-154222* - *[issues-26889](https://github.com/openvinotoolkit/openvino/issues/26889)* --- src/inference/src/os/lin/lin_system_conf.cpp | 363 ++++++++++-------- .../cpu_map_parser/cache_parser_linux.cpp | 245 ++++++++++++ .../unit/cpu_map_parser/freq_parser_linux.cpp | 183 +++++++++ 3 files changed, 641 insertions(+), 150 deletions(-) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index f8bd16173b8fce..48d486d2ed2d1b 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -23,76 +23,108 @@ CPU::CPU() { std::vector> system_info_table; std::vector node_info_table; - auto get_cache_info_linux = [&]() { + constexpr int cache_info_mode = 1; + constexpr int freq_info_mode = 2; + + auto get_info_linux = [&](int mode) { int cpu_index = 0; int cache_index = 0; int cache_files = 3; - std::vector one_info(cache_files); + std::string one_info; - while (1) { - for (int n = 0; n < cache_files; n++) { - cache_index = (n == 0) ? n : n + 1; - - std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + "/cache/index" + - std::to_string(cache_index) + "/shared_cpu_list"); - if (!cache_file.is_open()) { - cache_index = -1; - break; - } - std::string cache_info; - std::getline(cache_file, cache_info); - one_info[n] = std::move(cache_info); - } + std::vector file_name = {"/topology/core_cpus_list", + "/topology/physical_package_id", + "/cpufreq/cpuinfo_max_freq"}; + int num_of_files = file_name.size(); - if (cache_index == -1) { - if (cpu_index == 0) { - return -1; - } else { - return 0; - } - } else { - system_info_table.push_back(one_info); - cpu_index++; - } + std::string::size_type pos = 0; + std::string::size_type endpos = 0; + std::string sub_str; + + int core_1; + int core_2; + + system_info_table.clear(); + + std::ifstream possible_file("/sys/devices/system/cpu/possible"); + std::string possible_info; + + if (possible_file.is_open()) { + std::getline(possible_file, possible_info); + } else { + return -1; } - return 0; - }; + if ((endpos = possible_info.find('-', pos)) != std::string::npos) { + sub_str = possible_info.substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = possible_info.substr(endpos + 1); + core_2 = std::stoi(sub_str); + system_info_table.resize(core_2 + 1, std::vector(cache_files, "")); + } else { + return -1; + } - auto get_freq_info_linux = [&]() { - int cpu_index = 0; - int cache_index = 0; + std::ifstream online_file("/sys/devices/system/cpu/online"); + std::string online_info; - std::vector file_name = {"/topology/core_cpus_list", - "/topology/physical_package_id", - "/cpufreq/cpuinfo_max_freq"}; - int num_of_files = file_name.size(); - std::vector one_info(num_of_files); + if (online_file.is_open()) { + std::getline(online_file, online_info); + } else { + system_info_table.clear(); + return -1; + } while (1) { - for (int n = 0; n < num_of_files; n++) { - cache_index = n; + if ((endpos = online_info.find('-', pos)) != std::string::npos) { + sub_str = online_info.substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = online_info.substr(endpos + 1); + core_2 = std::stoi(sub_str); - std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + file_name[n]); - if (!cache_file.is_open()) { - cache_index = -1; - break; + for (cpu_index = core_1; cpu_index <= core_2; cpu_index++) { + if (mode == cache_info_mode) { + for (int n = 0; n < cache_files; n++) { + cache_index = (n == 0) ? n : n + 1; + one_info.clear(); + + std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + + "/cache/index" + std::to_string(cache_index) + "/shared_cpu_list"); + if (cache_file.is_open()) { + std::getline(cache_file, one_info); + } else { + if ((cpu_index == core_1) && (n == 0)) { + system_info_table.clear(); + return -1; + } + } + system_info_table[cpu_index][n] = std::move(one_info); + } + } else { + for (int n = 0; n < num_of_files; n++) { + one_info.clear(); + + std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + + file_name[n]); + if (cache_file.is_open()) { + std::getline(cache_file, one_info); + } else { + if ((cpu_index == core_1) && (n == 2)) { + system_info_table.clear(); + return -1; + } + } + system_info_table[cpu_index][n] = std::move(one_info); + } + } } - std::string cache_info; - std::getline(cache_file, cache_info); - one_info[n] = std::move(cache_info); } - if (cache_index == -1) { - if (cpu_index == 0) { - return -1; - } else { - return 0; - } + if ((pos = online_info.find(',', endpos)) != std::string::npos) { + pos++; } else { - system_info_table.push_back(one_info); - cpu_index++; + break; } } @@ -201,7 +233,7 @@ CPU::CPU() { get_node_info_linux(); - if (!get_cache_info_linux()) { + if (!get_info_linux(cache_info_mode)) { parse_cache_info_linux(system_info_table, node_info_table, _processors, @@ -215,7 +247,7 @@ CPU::CPU() { if ((_proc_type_table.size() == 0) || ((_proc_type_table[0][MAIN_CORE_PROC] == 0) && (_proc_type_table[0][ALL_PROC] > 0) && (_proc_type_table[0][ALL_PROC] != _proc_type_table[0][EFFICIENT_CORE_PROC]))) { - if (!get_freq_info_linux()) { + if (!get_info_linux(freq_info_mode)) { parse_freq_info_linux(system_info_table, node_info_table, _processors, @@ -471,56 +503,73 @@ void parse_cache_info_linux(const std::vector> system_i const std::vector line_value_0({0, 0, 0, 0, -1, -1}); - for (int n = 0; n < _processors; n++) { - if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { - std::string::size_type pos = 0; - std::string::size_type endpos = 0; - std::string sub_str; - - int core_1; - int core_2; + std::vector offline_list; + int info_index = 0; - if (0 == _sockets) { - _proc_type_table.push_back(line_value_0); - } else { - _proc_type_table.push_back(_proc_type_table[0]); - _proc_type_table[0] = line_value_0; - } - - while (1) { - if ((endpos = system_info_table[n][2].find('-', pos)) != std::string::npos) { - sub_str = system_info_table[n][2].substr(pos, endpos - pos); - core_1 = std::stoi(sub_str); - sub_str = system_info_table[n][2].substr(endpos + 1); - core_2 = std::stoi(sub_str); + for (int n = 0; n < _processors; n++) { + if ((system_info_table[n][2].size() > 0) || (system_info_table[n][1].size() > 0)) { + info_index = system_info_table[n][2].size() > 0 ? 2 : 1; + if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { + std::string::size_type pos = 0; + std::string::size_type endpos = 0; + std::string sub_str; + + int core_1; + int core_2; + + if (0 == _sockets) { + _proc_type_table.push_back(line_value_0); + } else { + _proc_type_table.push_back(_proc_type_table[0]); + _proc_type_table[0] = line_value_0; + } - for (int m = core_1; m <= core_2; m++) { - _cpu_mapping_table[m][CPU_MAP_SOCKET_ID] = _sockets; - _cpu_mapping_table[m][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[m][CPU_MAP_SOCKET_ID]; - update_proc_map_info(m); + while (1) { + if ((endpos = system_info_table[n][info_index].find('-', pos)) != std::string::npos) { + sub_str = system_info_table[n][info_index].substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = system_info_table[n][info_index].substr(endpos + 1); + core_2 = std::stoi(sub_str); + + if ((info_index == 1) && (core_2 - core_1 == 1)) { + offline_list.push_back(n); + break; + } + for (int m = core_1; m <= core_2; m++) { + _cpu_mapping_table[m][CPU_MAP_SOCKET_ID] = _sockets; + _cpu_mapping_table[m][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[m][CPU_MAP_SOCKET_ID]; + update_proc_map_info(m); + if (_processors == 0) { + return; + }; + } + } else if (pos != std::string::npos) { + sub_str = system_info_table[n][info_index].substr(pos); + core_1 = std::stoi(sub_str); + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = _sockets; + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + update_proc_map_info(core_1); if (_processors == 0) { return; }; + endpos = pos; } - } else if (pos != std::string::npos) { - sub_str = system_info_table[n][2].substr(pos); - core_1 = std::stoi(sub_str); - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = _sockets; - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - update_proc_map_info(core_1); - if (_processors == 0) { - return; - }; - endpos = pos; - } - if ((pos = system_info_table[n][2].find(',', endpos)) != std::string::npos) { - pos++; - } else { - break; + if ((pos = system_info_table[n][2].find(',', endpos)) != std::string::npos) { + pos++; + } else { + break; + } + } + _sockets++; + if (_proc_type_table[0][ALL_PROC] == 0) { + _proc_type_table.erase(_proc_type_table.begin()); + _sockets--; } } - _sockets++; + } else { + offline_list.push_back(n); } } @@ -540,6 +589,11 @@ void parse_cache_info_linux(const std::vector> system_i _numa_nodes = node_info_table.size(); parse_node_info_linux(node_info_table, _numa_nodes, _sockets, _proc_type_table, _cpu_mapping_table); } + + for (size_t n = 0; n < offline_list.size(); n++) { + _cpu_mapping_table.erase(_cpu_mapping_table.begin() + offline_list[n] - n); + _processors--; + } }; void get_cpu_mapping_from_cores(const int _processors, @@ -615,7 +669,6 @@ void parse_freq_info_linux(const std::vector> system_in std::vector>& _cpu_mapping_table) { int freq_max = 0; bool ecore_enabled = false; - bool ht_enabled = false; _processors = system_info_table.size(); _numa_nodes = 0; @@ -625,6 +678,8 @@ void parse_freq_info_linux(const std::vector> system_in std::vector line_value_0(PROC_TYPE_TABLE_SIZE, 0); + std::vector offline_list; + auto clean_up_output = [&]() { _processors = 0; _cores = 0; @@ -636,65 +691,68 @@ void parse_freq_info_linux(const std::vector> system_in }; for (int n = 0; n < _processors; n++) { - if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { - std::string::size_type pos = 0; - std::string::size_type endpos1 = 0; - std::string::size_type endpos2 = 0; - std::string sub_str; - - int core_1 = 0; - int core_2 = 0; - - if (((endpos1 = system_info_table[n][0].find(',', pos)) != std::string::npos) || - ((endpos2 = system_info_table[n][0].find('-', pos)) != std::string::npos)) { - endpos1 = (endpos1 != std::string::npos) ? endpos1 : endpos2; - sub_str = system_info_table[n][0].substr(pos, endpos1 - pos); - core_1 = std::stoi(sub_str); - sub_str = system_info_table[n][0].substr(endpos1 + 1); - core_2 = std::stoi(sub_str); - if ((core_1 != n) && (core_2 != n)) { - clean_up_output(); - return; - } - - _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = HYPER_THREADING_PROC; - _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + if (system_info_table[n][2].size() > 0) { + if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { + std::string::size_type pos = 0; + std::string::size_type endpos1 = 0; + std::string::size_type endpos2 = 0; + std::string sub_str; + + int core_1 = 0; + int core_2 = 0; + + if (((endpos1 = system_info_table[n][0].find(',', pos)) != std::string::npos) || + ((endpos2 = system_info_table[n][0].find('-', pos)) != std::string::npos)) { + endpos1 = (endpos1 != std::string::npos) ? endpos1 : endpos2; + sub_str = system_info_table[n][0].substr(pos, endpos1 - pos); + core_1 = std::stoi(sub_str); + sub_str = system_info_table[n][0].substr(endpos1 + 1); + core_2 = std::stoi(sub_str); + if ((core_1 != n) && (core_2 != n)) { + clean_up_output(); + return; + } - _cpu_mapping_table[core_2][CPU_MAP_PROCESSOR_ID] = core_2; - _cpu_mapping_table[core_2][CPU_MAP_SOCKET_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_2][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_2][CPU_MAP_CORE_ID] = _cpu_mapping_table[core_1][CPU_MAP_CORE_ID]; - _cpu_mapping_table[core_2][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; - _cpu_mapping_table[core_2][CPU_MAP_GROUP_ID] = _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID]; + _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = HYPER_THREADING_PROC; + _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + + _cpu_mapping_table[core_2][CPU_MAP_PROCESSOR_ID] = core_2; + _cpu_mapping_table[core_2][CPU_MAP_SOCKET_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_2][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_2][CPU_MAP_CORE_ID] = _cpu_mapping_table[core_1][CPU_MAP_CORE_ID]; + _cpu_mapping_table[core_2][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; + _cpu_mapping_table[core_2][CPU_MAP_GROUP_ID] = _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID]; + + int core_freq = std::stoi(system_info_table[core_1][2]); + freq_max = std::max(core_freq, freq_max); + } else if (system_info_table[n][0].size() > 0) { + core_1 = std::stoi(system_info_table[n][0]); - ht_enabled = true; - int core_freq = std::stoi(system_info_table[core_1][2]); - freq_max = std::max(core_freq, freq_max); - } else if (system_info_table[n][0].size() > 0) { - core_1 = std::stoi(system_info_table[n][0]); + _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; - _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + int core_freq = std::stoi(system_info_table[core_1][2]); + if ((0 == freq_max) || (core_freq >= freq_max * 0.97)) { + freq_max = std::max(core_freq, freq_max); + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; + } else { + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = EFFICIENT_CORE_PROC; + ecore_enabled = true; + } - int core_freq = std::stoi(system_info_table[core_1][2]); - if (((0 == freq_max) || (core_freq >= freq_max * 0.95)) && (!ht_enabled)) { - freq_max = std::max(core_freq, freq_max); - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; - } else { - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = EFFICIENT_CORE_PROC; - ecore_enabled = true; + _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; } - - _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + _sockets = std::max(_sockets, _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]); + _cores++; } - _sockets = std::max(_sockets, _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]); - _cores++; + } else { + offline_list.push_back(n); } } @@ -733,6 +791,11 @@ void parse_freq_info_linux(const std::vector> system_in _numa_nodes = node_info_table.size(); parse_node_info_linux(node_info_table, _numa_nodes, _sockets, _proc_type_table, _cpu_mapping_table); } + + for (size_t n = 0; n < offline_list.size(); n++) { + _cpu_mapping_table.erase(_cpu_mapping_table.begin() + offline_list[n] - n); + _processors--; + } }; void update_valid_processor_linux(const std::vector phy_core_list, diff --git a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp index 8679090b9ae491..9ea43bd0604296 100644 --- a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp @@ -385,6 +385,188 @@ LinuxCpuMapTestCase cache_1sockets_96cores = { {"0-95"}, }, }; +LinuxCpuMapTestCase cache_2sockets_56cores_hyperthreading = { + 110, + 2, + 2, + 56, + {{110, 56, 0, 54, -1, -1}, {54, 28, 0, 26, 0, 0}, {56, 28, 0, 28, 1, 1}}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {11, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {12, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {13, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {14, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {15, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {16, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {17, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {18, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {19, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {21, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {22, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {23, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {24, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {25, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {26, 0, 0, 24, HYPER_THREADING_PROC, 24, -1}, {27, 0, 0, 25, HYPER_THREADING_PROC, 25, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 1, 1, 48, HYPER_THREADING_PROC, 48, -1}, {49, 1, 1, 49, HYPER_THREADING_PROC, 49, -1}, + {50, 1, 1, 50, HYPER_THREADING_PROC, 50, -1}, {51, 1, 1, 51, HYPER_THREADING_PROC, 51, -1}, + {52, 1, 1, 52, HYPER_THREADING_PROC, 52, -1}, {53, 1, 1, 53, HYPER_THREADING_PROC, 53, -1}, + {54, 1, 1, 54, HYPER_THREADING_PROC, 54, -1}, {55, 1, 1, 55, HYPER_THREADING_PROC, 55, -1}, + {56, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {57, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {58, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {59, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {60, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {61, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {62, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {63, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {64, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {65, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {66, 0, 0, 26, MAIN_CORE_PROC, 26, -1}, {67, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, + {68, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, {69, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, + {70, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, {71, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, + {72, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, {73, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, + {74, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, {75, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, + {76, 0, 0, 27, MAIN_CORE_PROC, 27, -1}, {77, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {78, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {79, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {80, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {81, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {82, 0, 0, 24, MAIN_CORE_PROC, 24, -1}, {83, 0, 0, 25, MAIN_CORE_PROC, 25, -1}, + {84, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {85, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {86, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {87, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {88, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {89, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {90, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {91, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {92, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {93, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {94, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {95, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {96, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {97, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {98, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {99, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {100, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {101, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {102, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {103, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + {104, 1, 1, 48, MAIN_CORE_PROC, 48, -1}, {105, 1, 1, 49, MAIN_CORE_PROC, 49, -1}, + {106, 1, 1, 50, MAIN_CORE_PROC, 50, -1}, {107, 1, 1, 51, MAIN_CORE_PROC, 51, -1}, + {108, 1, 1, 52, MAIN_CORE_PROC, 52, -1}, {109, 1, 1, 53, MAIN_CORE_PROC, 53, -1}, + {110, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {111, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + }, + { + {"0,56", "0,56", "0-9,11-19,21-27,56-83"}, + {"1,57", "1,57", "0-9,11-19,21-27,56-83"}, + {"2,58", "2,58", "0-9,11-19,21-27,56-83"}, + {"3,59", "3,59", "0-9,11-19,21-27,56-83"}, + {"4,60", "4,60", "0-9,11-19,21-27,56-83"}, + {"5,61", "5,61", "0-9,11-19,21-27,56-83"}, + {"6,62", "6,62", "0-9,11-19,21-27,56-83"}, + {"7,63", "7,63", "0-9,11-19,21-27,56-83"}, + {"8,64", "8,64", "0-9,11-19,21-27,56-83"}, + {"9,65", "9,65", "0-9,11-19,21-27,56-83"}, + {"", "", ""}, + {"11,67", "11,67", "0-9,11-19,21-27,56-83"}, + {"12,68", "12,68", "0-9,11-19,21-27,56-83"}, + {"13,69", "13,69", "0-9,11-19,21-27,56-83"}, + {"14,70", "14,70", "0-9,11-19,21-27,56-83"}, + {"15,71", "15,71", "0-9,11-19,21-27,56-83"}, + {"16,72", "16,72", "0-9,11-19,21-27,56-83"}, + {"17,73", "17,73", "0-9,11-19,21-27,56-83"}, + {"18,74", "18,74", "0-9,11-19,21-27,56-83"}, + {"19,75", "19,75", "0-9,11-19,21-27,56-83"}, + {"", "", ""}, + {"21,77", "21,77", "0-9,11-19,21-27,56-83"}, + {"22,78", "22,78", "0-9,11-19,21-27,56-83"}, + {"23,79", "23,79", "0-9,11-19,21-27,56-83"}, + {"24,80", "24,80", "0-9,11-19,21-27,56-83"}, + {"25,81", "25,81", "0-9,11-19,21-27,56-83"}, + {"26,82", "26,82", "0-9,11-19,21-27,56-83"}, + {"27,83", "27,83", "0-9,11-19,21-27,56-83"}, + {"28,84", "28,84", "28-55,84-111"}, + {"29,85", "29,85", "28-55,84-111"}, + {"30,86", "30,86", "28-55,84-111"}, + {"31,87", "31,87", "28-55,84-111"}, + {"32,88", "32,88", "28-55,84-111"}, + {"33,89", "33,89", "28-55,84-111"}, + {"34,90", "34,90", "28-55,84-111"}, + {"35,91", "35,91", "28-55,84-111"}, + {"36,92", "36,92", "28-55,84-111"}, + {"37,93", "37,93", "28-55,84-111"}, + {"38,94", "38,94", "28-55,84-111"}, + {"39,95", "39,95", "28-55,84-111"}, + {"40,96", "40,96", "28-55,84-111"}, + {"41,97", "41,97", "28-55,84-111"}, + {"42,98", "42,98", "28-55,84-111"}, + {"43,99", "43,99", "28-55,84-111"}, + {"44,100", "44,100", "28-55,84-111"}, + {"45,101", "45,101", "28-55,84-111"}, + {"46,102", "46,102", "28-55,84-111"}, + {"47,103", "47,103", "28-55,84-111"}, + {"48,104", "48,104", "28-55,84-111"}, + {"49,105", "49,105", "28-55,84-111"}, + {"50,106", "50,106", "28-55,84-111"}, + {"51,107", "51,107", "28-55,84-111"}, + {"52,108", "52,108", "28-55,84-111"}, + {"53,109", "53,109", "28-55,84-111"}, + {"54,110", "54,110", "28-55,84-111"}, + {"55,111", "55,111", "28-55,84-111"}, + {"0,56", "0,56", "0-9,11-19,21-27,56-83"}, + {"1,57", "1,57", "0-9,11-19,21-27,56-83"}, + {"2,58", "2,58", "0-9,11-19,21-27,56-83"}, + {"3,59", "3,59", "0-9,11-19,21-27,56-83"}, + {"4,60", "4,60", "0-9,11-19,21-27,56-83"}, + {"5,61", "5,61", "0-9,11-19,21-27,56-83"}, + {"6,62", "6,62", "0-9,11-19,21-27,56-83"}, + {"7,63", "7,63", "0-9,11-19,21-27,56-83"}, + {"8,64", "8,64", "0-9,11-19,21-27,56-83"}, + {"9,65", "9,65", "0-9,11-19,21-27,56-83"}, + {"66", "66", "0-9,11-19,21-27,56-83"}, + {"11,67", "11,67", "0-9,11-19,21-27,56-83"}, + {"12,68", "12,68", "0-9,11-19,21-27,56-83"}, + {"13,69", "13,69", "0-9,11-19,21-27,56-83"}, + {"14,70", "14,70", "0-9,11-19,21-27,56-83"}, + {"15,71", "15,71", "0-9,11-19,21-27,56-83"}, + {"16,72", "16,72", "0-9,11-19,21-27,56-83"}, + {"17,73", "17,73", "0-9,11-19,21-27,56-83"}, + {"18,74", "18,74", "0-9,11-19,21-27,56-83"}, + {"19,75", "19,75", "0-9,11-19,21-27,56-83"}, + {"76", "76", "0-9,11-19,21-27,56-83"}, + {"21,77", "21,77", "0-9,11-19,21-27,56-83"}, + {"22,78", "22,78", "0-9,11-19,21-27,56-83"}, + {"23,79", "23,79", "0-9,11-19,21-27,56-83"}, + {"24,80", "24,80", "0-9,11-19,21-27,56-83"}, + {"25,81", "25,81", "0-9,11-19,21-27,56-83"}, + {"26,82", "26,82", "0-9,11-19,21-27,56-83"}, + {"27,83", "27,83", "0-9,11-19,21-27,56-83"}, + {"28,84", "28,84", "28-55,84-111"}, + {"29,85", "29,85", "28-55,84-111"}, + {"30,86", "30,86", "28-55,84-111"}, + {"31,87", "31,87", "28-55,84-111"}, + {"32,88", "32,88", "28-55,84-111"}, + {"33,89", "33,89", "28-55,84-111"}, + {"34,90", "34,90", "28-55,84-111"}, + {"35,91", "35,91", "28-55,84-111"}, + {"36,92", "36,92", "28-55,84-111"}, + {"37,93", "37,93", "28-55,84-111"}, + {"38,94", "38,94", "28-55,84-111"}, + {"39,95", "39,95", "28-55,84-111"}, + {"40,96", "40,96", "28-55,84-111"}, + {"41,97", "41,97", "28-55,84-111"}, + {"42,98", "42,98", "28-55,84-111"}, + {"43,99", "43,99", "28-55,84-111"}, + {"44,100", "44,100", "28-55,84-111"}, + {"45,101", "45,101", "28-55,84-111"}, + {"46,102", "46,102", "28-55,84-111"}, + {"47,103", "47,103", "28-55,84-111"}, + {"48,104", "48,104", "28-55,84-111"}, + {"49,105", "49,105", "28-55,84-111"}, + {"50,106", "50,106", "28-55,84-111"}, + {"51,107", "51,107", "28-55,84-111"}, + {"52,108", "52,108", "28-55,84-111"}, + {"53,109", "53,109", "28-55,84-111"}, + {"54,110", "54,110", "28-55,84-111"}, + {"55,111", "55,111", "28-55,84-111"}, + }, + { + {"0-9,11-19,21-27,56-83"}, + {"28-55,84-111"}, + }, +}; LinuxCpuMapTestCase cache_2sockets_48cores_hyperthreading = { 96, 2, @@ -1005,6 +1187,36 @@ LinuxCpuMapTestCase cache_2sockets_20cores_hyperthreading_1 = { }, {}, }; +LinuxCpuMapTestCase cache_1sockets_16cores_hyperthreading = { + 20, + 1, + 1, + 14, + {{20, 6, 8, 6, 0, 0}}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {3, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {5, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 6, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 6, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 6, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 7, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 7, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 7, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 7, -1}, + }, + { + {"0,5", "0,5", "0-19"}, {"1-2", "1-2", "0-19"}, {"1-2", "1-2", "0-19"}, {"3-4", "3-4", "0-19"}, + {"3-4", "3-4", "0-19"}, {"0,5", "0,5", "0-19"}, {"6-7", "6-7", "0-19"}, {"6-7", "6-7", "0-19"}, + {"8-9", "8-9", "0-19"}, {"8-9", "8-9", "0-19"}, {"10-11", "10-11", "0-19"}, {"10-11", "10-11", "0-19"}, + {"12", "12-15", "0-19"}, {"13", "12-15", "0-19"}, {"14", "12-15", "0-19"}, {"15", "12-15", "0-19"}, + {"16", "16-19", "0-19"}, {"17", "16-19", "0-19"}, {"18", "16-19", "0-19"}, {"19", "16-19", "0-19"}, + {"20", "20-21", ""}, {"21", "20-21", ""}, + }, + { + {"0-21"}, + }, +}; LinuxCpuMapTestCase cache_1sockets_14cores_hyperthreading = { 20, 1, @@ -1135,6 +1347,36 @@ LinuxCpuMapTestCase cache_1sockets_8cores_hyperthreading = { }, {{"0-11"}}, }; +LinuxCpuMapTestCase cache_1sockets_8cores_hyperthreading_1 = { + 8, + 1, + 1, + 8, + {{8, 4, 4, 0, 0, 0}}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, + {5, 0, 0, 5, EFFICIENT_CORE_PROC, 4, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 4, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 4, -1}, + }, + { + {"0", "0", "0-3"}, + {"1", "1", "0-3"}, + {"2", "2", "0-3"}, + {"3", "3", "0-3"}, + {"4", "4-7", ""}, + {"5", "4-7", ""}, + {"6", "4-7", ""}, + {"7", "4-7", ""}, + }, + { + {"0-7"}, + }, +}; LinuxCpuMapTestCase cache_1sockets_6cores_hyperthreading = { 12, 1, @@ -1220,6 +1462,7 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapCacheParserTests, testing::Values(cache_2sockets_104cores_hyperthreading, cache_1sockets_96cores, + cache_2sockets_56cores_hyperthreading, cache_2sockets_48cores_hyperthreading, cache_2sockets_48cores_hyperthreading_1, cache_2sockets_24cores_hyperthreading, @@ -1229,10 +1472,12 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, cache_2sockets_48cores_2, cache_2sockets_20cores_hyperthreading, cache_2sockets_20cores_hyperthreading_1, + cache_1sockets_16cores_hyperthreading, cache_1sockets_14cores_hyperthreading, cache_1sockets_14cores_hyperthreading_1, cache_1sockets_10cores_hyperthreading, cache_1sockets_8cores_hyperthreading, + cache_1sockets_8cores_hyperthreading_1, cache_1sockets_6cores_hyperthreading, cache_1sockets_4cores, cache_VM_cache_0)); diff --git a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp index 04ab617961b953..8ccdfad011d19c 100644 --- a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp @@ -258,6 +258,188 @@ LinuxCpuMapTestCase freq_2sockets_112cores_hyperthreading = { }, // param[in]: The CPU frequency information table of this simulated platform {{"0-55,112-167"}, {"56-111,168-223"}}, // param[in]: The numa node information table of this simulated platform }; +LinuxCpuMapTestCase freq_2sockets_56cores_hyperthreading = { + 110, + 2, + 2, + 56, + {{110, 56, 0, 54, -1, -1}, {54, 28, 0, 26, 0, 0}, {56, 28, 0, 28, 1, 1}}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {11, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {12, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {13, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {14, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {15, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {16, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {17, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {18, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {19, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {21, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {22, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {23, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {24, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {25, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {26, 0, 0, 24, HYPER_THREADING_PROC, 24, -1}, {27, 0, 0, 25, HYPER_THREADING_PROC, 25, -1}, + {28, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {29, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {30, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {31, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {32, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {33, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {34, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {35, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {36, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {37, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {38, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {39, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {40, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {41, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {42, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {43, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {44, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {45, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {46, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {47, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {48, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {49, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {50, 1, 1, 48, HYPER_THREADING_PROC, 48, -1}, {51, 1, 1, 49, HYPER_THREADING_PROC, 49, -1}, + {52, 1, 1, 50, HYPER_THREADING_PROC, 50, -1}, {53, 1, 1, 51, HYPER_THREADING_PROC, 51, -1}, + {54, 1, 1, 52, HYPER_THREADING_PROC, 52, -1}, {55, 1, 1, 53, HYPER_THREADING_PROC, 53, -1}, + {56, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {57, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {58, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {59, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {60, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {61, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {62, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {63, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {64, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {65, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {66, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {67, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, + {68, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, {69, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, + {70, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, {71, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, + {72, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, {73, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, + {74, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, {75, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, + {76, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, {77, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {78, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {79, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {80, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {81, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {82, 0, 0, 24, MAIN_CORE_PROC, 24, -1}, {83, 0, 0, 25, MAIN_CORE_PROC, 25, -1}, + {84, 1, 1, 26, MAIN_CORE_PROC, 26, -1}, {85, 1, 1, 27, MAIN_CORE_PROC, 27, -1}, + {86, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {87, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {88, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {89, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {90, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {91, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {92, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {93, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {94, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {95, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {96, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {97, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {98, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {99, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {100, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {101, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {102, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {103, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {104, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {105, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + {106, 1, 1, 48, MAIN_CORE_PROC, 48, -1}, {107, 1, 1, 49, MAIN_CORE_PROC, 49, -1}, + {108, 1, 1, 50, MAIN_CORE_PROC, 50, -1}, {109, 1, 1, 51, MAIN_CORE_PROC, 51, -1}, + {110, 1, 1, 52, MAIN_CORE_PROC, 52, -1}, {111, 1, 1, 53, MAIN_CORE_PROC, 53, -1}, + }, + { + {"0,56", "0", "3500000"}, + {"1,57", "0", "3500000"}, + {"2,58", "0", "3500000"}, + {"3,59", "0", "3500000"}, + {"4,60", "0", "3500000"}, + {"5,61", "0", "3500000"}, + {"6,62", "0", "3500000"}, + {"7,63", "0", "3500000"}, + {"8,64", "0", "3500000"}, + {"9,65", "0", "3500000"}, + {"", "", ""}, + {"11,67", "0", "3500000"}, + {"12,68", "0", "3500000"}, + {"13,69", "0", "3500000"}, + {"14,70", "0", "3500000"}, + {"15,71", "0", "3500000"}, + {"16,72", "0", "3500000"}, + {"17,73", "0", "3500000"}, + {"18,74", "0", "3500000"}, + {"19,75", "0", "3500000"}, + {"", "", ""}, + {"21,77", "0", "3500000"}, + {"22,78", "0", "3500000"}, + {"23,79", "0", "3500000"}, + {"24,80", "0", "3500000"}, + {"25,81", "0", "3500000"}, + {"26,82", "0", "3500000"}, + {"27,83", "0", "3500000"}, + {"28,84", "1", "3500000"}, + {"29,85", "1", "3500000"}, + {"30,86", "1", "3500000"}, + {"31,87", "1", "3500000"}, + {"32,88", "1", "3500000"}, + {"33,89", "1", "3500000"}, + {"34,90", "1", "3500000"}, + {"35,91", "1", "3500000"}, + {"36,92", "1", "3500000"}, + {"37,93", "1", "3500000"}, + {"38,94", "1", "3500000"}, + {"39,95", "1", "3500000"}, + {"40,96", "1", "3500000"}, + {"41,97", "1", "3500000"}, + {"42,98", "1", "3500000"}, + {"43,99", "1", "3500000"}, + {"44,100", "1", "3500000"}, + {"45,101", "1", "3500000"}, + {"46,102", "1", "3500000"}, + {"47,103", "1", "3500000"}, + {"48,104", "1", "3500000"}, + {"49,105", "1", "3500000"}, + {"50,106", "1", "3500000"}, + {"51,107", "1", "3500000"}, + {"52,108", "1", "3500000"}, + {"53,109", "1", "3500000"}, + {"54,110", "1", "3500000"}, + {"55,111", "1", "3500000"}, + {"0,56", "0", "3500000"}, + {"1,57", "0", "3500000"}, + {"2,58", "0", "3500000"}, + {"3,59", "0", "3500000"}, + {"4,60", "0", "3500000"}, + {"5,61", "0", "3500000"}, + {"6,62", "0", "3500000"}, + {"7,63", "0", "3500000"}, + {"8,64", "0", "3500000"}, + {"9,65", "0", "3500000"}, + {"66", "0", "3500000"}, + {"11,67", "0", "3500000"}, + {"12,68", "0", "3500000"}, + {"13,69", "0", "3500000"}, + {"14,70", "0", "3500000"}, + {"15,71", "0", "3500000"}, + {"16,72", "0", "3500000"}, + {"17,73", "0", "3500000"}, + {"18,74", "0", "3500000"}, + {"19,75", "0", "3500000"}, + {"76", "0", "3500000"}, + {"21,77", "0", "3500000"}, + {"22,78", "0", "3500000"}, + {"23,79", "0", "3500000"}, + {"24,80", "0", "3500000"}, + {"25,81", "0", "3500000"}, + {"26,82", "0", "3500000"}, + {"27,83", "0", "3500000"}, + {"28,84", "1", "3500000"}, + {"29,85", "1", "3500000"}, + {"30,86", "1", "3500000"}, + {"31,87", "1", "3500000"}, + {"32,88", "1", "3500000"}, + {"33,89", "1", "3500000"}, + {"34,90", "1", "3500000"}, + {"35,91", "1", "3500000"}, + {"36,92", "1", "3500000"}, + {"37,93", "1", "3500000"}, + {"38,94", "1", "3500000"}, + {"39,95", "1", "3500000"}, + {"40,96", "1", "3500000"}, + {"41,97", "1", "3500000"}, + {"42,98", "1", "3500000"}, + {"43,99", "1", "3500000"}, + {"44,100", "1", "3500000"}, + {"45,101", "1", "3500000"}, + {"46,102", "1", "3500000"}, + {"47,103", "1", "3500000"}, + {"48,104", "1", "3500000"}, + {"49,105", "1", "3500000"}, + {"50,106", "1", "3500000"}, + {"51,107", "1", "3500000"}, + {"52,108", "1", "3500000"}, + {"53,109", "1", "3500000"}, + {"54,110", "1", "3500000"}, + {"55,111", "1", "3500000"}, + }, + { + {"0-9,11-19,21-27,56-83"}, + {"28-55,84-111"}, + }, +}; LinuxCpuMapTestCase freq_2sockets_48cores_hyperthreading = { 96, 2, @@ -987,6 +1169,7 @@ TEST_P(LinuxCpuMapFreqParserTests, LinuxFreq) {} INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapFreqParserTests, testing::Values(freq_2sockets_112cores_hyperthreading, + freq_2sockets_56cores_hyperthreading, freq_2sockets_48cores_hyperthreading, freq_2sockets_48cores_hyperthreading_1, freq_2sockets_24cores_hyperthreading, From 7232d242d71d2e20b148df1f3042c9d550d8e16a Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Wed, 18 Dec 2024 06:52:23 +0100 Subject: [PATCH 04/60] [CPU][ARM] PagedAttention fixes (#28017) ### Details: - Fix: do not infer non-executable node if it's in constant path - Fix: redefine the 2nd output memory of `PagedAttention` node if `m_hasScore` is `false` - These 2 fixes required to support fp16 PagedAttention: https://github.com/openvinotoolkit/openvino/pull/27841 ### Tickets: - *ticket-id* --- src/plugins/intel_cpu/src/graph.cpp | 2 +- src/plugins/intel_cpu/src/nodes/paged_attn.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index fd6721ce4c83ad..aab78a4d5f15bd 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -552,7 +552,7 @@ void Graph::CreatePrimitivesAndExecConstants() const { node->createPrimitive(); } - if (!node->isConstant()) { + if (!node->isConstant() || !node->isExecutable()) { continue; } diff --git a/src/plugins/intel_cpu/src/nodes/paged_attn.cpp b/src/plugins/intel_cpu/src/nodes/paged_attn.cpp index 19acdea4942eed..b51b2b3d8029a9 100644 --- a/src/plugins/intel_cpu/src/nodes/paged_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/paged_attn.cpp @@ -189,7 +189,7 @@ void PagedAttention::execute(dnnl::stream strm) { VectorDims scoreDims{len}; redefineOutputMemory({outDims, scoreDims}); } else { - redefineOutputMemory(0, outDims); + redefineOutputMemory({outDims, {0}}); } outputs[0] = getDstMemoryAtPort(0); From cd7cb741eddf96e554cbe50e81925c1baac4f329 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Wed, 18 Dec 2024 08:23:22 +0100 Subject: [PATCH 05/60] [DOCS] Updating Selector Tool (#28110) Porting: #28109 Signed-off-by: sgolebiewski-intel --- .../get-started/install-openvino.rst | 2 +- .../selector-tool/assets/selector-DiE3WrtX.js | 59 ++++++++++++++++++ ...tor-BC2lpCQ9.css => selector-DwLwwkWa.css} | 0 .../selector-tool/assets/selector-ww24l5P1.js | 61 ------------------- ...tor-2a63478.html => selector-15432eb.html} | 6 +- 5 files changed, 63 insertions(+), 65 deletions(-) create mode 100644 docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js rename docs/sphinx_setup/_static/selector-tool/assets/{selector-BC2lpCQ9.css => selector-DwLwwkWa.css} (100%) delete mode 100644 docs/sphinx_setup/_static/selector-tool/assets/selector-ww24l5P1.js rename docs/sphinx_setup/_static/selector-tool/{selector-2a63478.html => selector-15432eb.html} (66%) diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index ef9eca39aae388..29547d5b0fc2e5 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -21,7 +21,7 @@ Install OpenVINO™ 2024.6 - + OpenVINO 2024.6, described here, is not a Long-Term-Support version! All currently supported versions are: diff --git a/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js b/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js new file mode 100644 index 00000000000000..264f23f1dd17e3 --- /dev/null +++ b/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js @@ -0,0 +1,59 @@ +var Vd=Object.defineProperty;var bd=(e,t,n)=>t in e?Vd(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var ze=(e,t,n)=>bd(e,typeof t!="symbol"?t+"":t,n);function qu(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var ec={exports:{}},lo={},tc={exports:{}},D={};/** + * @license React + * react.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var Hr=Symbol.for("react.element"),$d=Symbol.for("react.portal"),Md=Symbol.for("react.fragment"),Bd=Symbol.for("react.strict_mode"),Kd=Symbol.for("react.profiler"),Hd=Symbol.for("react.provider"),Gd=Symbol.for("react.context"),Wd=Symbol.for("react.forward_ref"),Yd=Symbol.for("react.suspense"),Qd=Symbol.for("react.memo"),Jd=Symbol.for("react.lazy"),da=Symbol.iterator;function Xd(e){return e===null||typeof e!="object"?null:(e=da&&e[da]||e["@@iterator"],typeof e=="function"?e:null)}var nc={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},rc=Object.assign,ic={};function qn(e,t,n){this.props=e,this.context=t,this.refs=ic,this.updater=n||nc}qn.prototype.isReactComponent={};qn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};qn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function oc(){}oc.prototype=qn.prototype;function ll(e,t,n){this.props=e,this.context=t,this.refs=ic,this.updater=n||nc}var al=ll.prototype=new oc;al.constructor=ll;rc(al,qn.prototype);al.isPureReactComponent=!0;var pa=Array.isArray,sc=Object.prototype.hasOwnProperty,ul={current:null},lc={key:!0,ref:!0,__self:!0,__source:!0};function ac(e,t,n){var r,i={},o=null,s=null;if(t!=null)for(r in t.ref!==void 0&&(s=t.ref),t.key!==void 0&&(o=""+t.key),t)sc.call(t,r)&&!lc.hasOwnProperty(r)&&(i[r]=t[r]);var l=arguments.length-2;if(l===1)i.children=n;else if(1{const e={type:"size",height:document.body.offsetHeight};window.parent.postMessage(e)};new ResizeObserver(up).observe(document.body);function ue(e){"@babel/helpers - typeof";return ue=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(t){return typeof t}:function(t){return t&&typeof Symbol=="function"&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},ue(e)}function ct(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function cp(e,t){if(ue(e)!=="object"||e===null)return e;var n=e[Symbol.toPrimitive];if(n!==void 0){var r=n.call(e,t||"default");if(ue(r)!=="object")return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return(t==="string"?String:Number)(e)}function cc(e){var t=cp(e,"string");return ue(t)==="symbol"?t:String(t)}function ga(e,t){for(var n=0;ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n1&&arguments[1]!==void 0?arguments[1]:{};ct(this,e),this.init(t,n)}return ft(e,[{key:"init",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};this.prefix=r.prefix||"i18next:",this.logger=n||pp,this.options=r,this.debug=r.debug}},{key:"setDebug",value:function(n){this.debug=n}},{key:"log",value:function(){for(var n=arguments.length,r=new Array(n),i=0;i1?r-1:0),o=1;o-1?l.replace(/###/g,"."):l}function i(){return!e||typeof e=="string"}for(var o=typeof t!="string"?[].concat(t):t.split(".");o.length>1;){if(i())return{};var s=r(o.shift());!e[s]&&n&&(e[s]=new n),Object.prototype.hasOwnProperty.call(e,s)?e=e[s]:e={}}return i()?{}:{obj:e,k:r(o.shift())}}function ka(e,t,n){var r=fl(e,t,Object),i=r.obj,o=r.k;i[o]=n}function mp(e,t,n,r){var i=fl(e,t,Object),o=i.obj,s=i.k;o[s]=o[s]||[],o[s].push(n)}function Ai(e,t){var n=fl(e,t),r=n.obj,i=n.k;if(r)return r[i]}function Sa(e,t,n){var r=Ai(e,n);return r!==void 0?r:Ai(t,n)}function hc(e,t,n){for(var r in t)r!=="__proto__"&&r!=="constructor"&&(r in e?typeof e[r]=="string"||e[r]instanceof String||typeof t[r]=="string"||t[r]instanceof String?n&&(e[r]=t[r]):hc(e[r],t[r],n):e[r]=t[r]);return e}function On(e){return e.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g,"\\$&")}var vp={"&":"&","<":"<",">":">",'"':""","'":"'","/":"/"};function yp(e){return typeof e=="string"?e.replace(/[&<>"'\/]/g,function(t){return vp[t]}):e}var uo=typeof window<"u"&&window.navigator&&typeof window.navigator.userAgentData>"u"&&window.navigator.userAgent&&window.navigator.userAgent.indexOf("MSIE")>-1,wp=[" ",",","?","!",";"];function kp(e,t,n){t=t||"",n=n||"";var r=wp.filter(function(l){return t.indexOf(l)<0&&n.indexOf(l)<0});if(r.length===0)return!0;var i=new RegExp("(".concat(r.map(function(l){return l==="?"?"\\?":l}).join("|"),")")),o=!i.test(e);if(!o){var s=e.indexOf(n);s>0&&!i.test(e.substring(0,s))&&(o=!0)}return o}function xa(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(i){return Object.getOwnPropertyDescriptor(e,i).enumerable})),n.push.apply(n,r)}return n}function ni(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function gc(e,t){var n=arguments.length>2&&arguments[2]!==void 0?arguments[2]:".";if(e){if(e[t])return e[t];for(var r=t.split(n),i=e,o=0;oo+s;)s++,l=r.slice(o,o+s).join(n),a=i[l];if(a===void 0)return;if(a===null)return null;if(t.endsWith(l)){if(typeof a=="string")return a;if(l&&typeof a[l]=="string")return a[l]}var c=r.slice(o+s).join(n);return c?gc(a,c,n):void 0}i=i[r[o]]}return i}}var Op=function(e){ao(n,e);var t=Sp(n);function n(r){var i,o=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{ns:["translation"],defaultNS:"translation"};return ct(this,n),i=t.call(this),uo&&Xt.call(Mt(i)),i.data=r||{},i.options=o,i.options.keySeparator===void 0&&(i.options.keySeparator="."),i.options.ignoreJSONStructure===void 0&&(i.options.ignoreJSONStructure=!0),i}return ft(n,[{key:"addNamespaces",value:function(i){this.options.ns.indexOf(i)<0&&this.options.ns.push(i)}},{key:"removeNamespaces",value:function(i){var o=this.options.ns.indexOf(i);o>-1&&this.options.ns.splice(o,1)}},{key:"getResource",value:function(i,o,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{},a=l.keySeparator!==void 0?l.keySeparator:this.options.keySeparator,c=l.ignoreJSONStructure!==void 0?l.ignoreJSONStructure:this.options.ignoreJSONStructure,p=[i,o];s&&typeof s!="string"&&(p=p.concat(s)),s&&typeof s=="string"&&(p=p.concat(a?s.split(a):s)),i.indexOf(".")>-1&&(p=i.split("."));var d=Ai(this.data,p);return d||!c||typeof s!="string"?d:gc(this.data&&this.data[i]&&this.data[i][o],s,a)}},{key:"addResource",value:function(i,o,s,l){var a=arguments.length>4&&arguments[4]!==void 0?arguments[4]:{silent:!1},c=this.options.keySeparator;c===void 0&&(c=".");var p=[i,o];s&&(p=p.concat(c?s.split(c):s)),i.indexOf(".")>-1&&(p=i.split("."),l=o,o=p[1]),this.addNamespaces(o),ka(this.data,p,l),a.silent||this.emit("added",i,o,s,l)}},{key:"addResources",value:function(i,o,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{silent:!1};for(var a in s)(typeof s[a]=="string"||Object.prototype.toString.apply(s[a])==="[object Array]")&&this.addResource(i,o,a,s[a],{silent:!0});l.silent||this.emit("added",i,o,s)}},{key:"addResourceBundle",value:function(i,o,s,l,a){var c=arguments.length>5&&arguments[5]!==void 0?arguments[5]:{silent:!1},p=[i,o];i.indexOf(".")>-1&&(p=i.split("."),l=s,s=o,o=p[1]),this.addNamespaces(o);var d=Ai(this.data,p)||{};l?hc(d,s,a):d=ni(ni({},d),s),ka(this.data,p,d),c.silent||this.emit("added",i,o,s)}},{key:"removeResourceBundle",value:function(i,o){this.hasResourceBundle(i,o)&&delete this.data[i][o],this.removeNamespaces(o),this.emit("removed",i,o)}},{key:"hasResourceBundle",value:function(i,o){return this.getResource(i,o)!==void 0}},{key:"getResourceBundle",value:function(i,o){return o||(o=this.options.defaultNS),this.options.compatibilityAPI==="v1"?ni(ni({},{}),this.getResource(i,o)):this.getResource(i,o)}},{key:"getDataByLanguage",value:function(i){return this.data[i]}},{key:"hasLanguageSomeTranslations",value:function(i){var o=this.getDataByLanguage(i),s=o&&Object.keys(o)||[];return!!s.find(function(l){return o[l]&&Object.keys(o[l]).length>0})}},{key:"toJSON",value:function(){return this.data}}]),n}(Xt),mc={processors:{},addPostProcessor:function(t){this.processors[t.name]=t},handle:function(t,n,r,i,o){var s=this;return t.forEach(function(l){s.processors[l]&&(n=s.processors[l].process(n,r,i,o))}),n}};function Oa(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(i){return Object.getOwnPropertyDescriptor(e,i).enumerable})),n.push.apply(n,r)}return n}function ve(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}var Pa={},Na=function(e){ao(n,e);var t=Pp(n);function n(r){var i,o=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};return ct(this,n),i=t.call(this),uo&&Xt.call(Mt(i)),gp(["resourceStore","languageUtils","pluralResolver","interpolator","backendConnector","i18nFormat","utils"],r,Mt(i)),i.options=o,i.options.keySeparator===void 0&&(i.options.keySeparator="."),i.logger=vt.create("translator"),i}return ft(n,[{key:"changeLanguage",value:function(i){i&&(this.language=i)}},{key:"exists",value:function(i){var o=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{interpolation:{}};if(i==null)return!1;var s=this.resolve(i,o);return s&&s.res!==void 0}},{key:"extractFromKey",value:function(i,o){var s=o.nsSeparator!==void 0?o.nsSeparator:this.options.nsSeparator;s===void 0&&(s=":");var l=o.keySeparator!==void 0?o.keySeparator:this.options.keySeparator,a=o.ns||this.options.defaultNS||[],c=s&&i.indexOf(s)>-1,p=!this.options.userDefinedKeySeparator&&!o.keySeparator&&!this.options.userDefinedNsSeparator&&!o.nsSeparator&&!kp(i,s,l);if(c&&!p){var d=i.match(this.interpolator.nestingRegexp);if(d&&d.length>0)return{key:i,namespaces:a};var h=i.split(s);(s!==l||s===l&&this.options.ns.indexOf(h[0])>-1)&&(a=h.shift()),i=h.join(l)}return typeof a=="string"&&(a=[a]),{key:i,namespaces:a}}},{key:"translate",value:function(i,o,s){var l=this;if(ue(o)!=="object"&&this.options.overloadTranslationOptionHandler&&(o=this.options.overloadTranslationOptionHandler(arguments)),o||(o={}),i==null)return"";Array.isArray(i)||(i=[String(i)]);var a=o.returnDetails!==void 0?o.returnDetails:this.options.returnDetails,c=o.keySeparator!==void 0?o.keySeparator:this.options.keySeparator,p=this.extractFromKey(i[i.length-1],o),d=p.key,h=p.namespaces,v=h[h.length-1],y=o.lng||this.language,k=o.appendNamespaceToCIMode||this.options.appendNamespaceToCIMode;if(y&&y.toLowerCase()==="cimode"){if(k){var O=o.nsSeparator||this.options.nsSeparator;return a?{res:"".concat(v).concat(O).concat(d),usedKey:d,exactUsedKey:d,usedLng:y,usedNS:v}:"".concat(v).concat(O).concat(d)}return a?{res:d,usedKey:d,exactUsedKey:d,usedLng:y,usedNS:v}:d}var f=this.resolve(i,o),u=f&&f.res,g=f&&f.usedKey||d,w=f&&f.exactUsedKey||d,x=Object.prototype.toString.apply(u),S=["[object Number]","[object Function]","[object RegExp]"],N=o.joinArrays!==void 0?o.joinArrays:this.options.joinArrays,_=!this.i18nFormat||this.i18nFormat.handleAsObject,L=typeof u!="string"&&typeof u!="boolean"&&typeof u!="number";if(_&&u&&L&&S.indexOf(x)<0&&!(typeof N=="string"&&x==="[object Array]")){if(!o.returnObjects&&!this.options.returnObjects){this.options.returnedObjectHandler||this.logger.warn("accessing an object - but returnObjects options is not enabled!");var E=this.options.returnedObjectHandler?this.options.returnedObjectHandler(g,u,ve(ve({},o),{},{ns:h})):"key '".concat(d," (").concat(this.language,")' returned an object instead of string.");return a?(f.res=E,f):E}if(c){var K=x==="[object Array]",Ce=K?[]:{},St=K?w:g;for(var et in u)if(Object.prototype.hasOwnProperty.call(u,et)){var kn="".concat(St).concat(c).concat(et);Ce[et]=this.translate(kn,ve(ve({},o),{joinArrays:!1,ns:h})),Ce[et]===kn&&(Ce[et]=u[et])}u=Ce}}else if(_&&typeof N=="string"&&x==="[object Array]")u=u.join(N),u&&(u=this.extendTranslation(u,i,o,s));else{var dt=!1,tt=!1,C=o.count!==void 0&&typeof o.count!="string",I=n.hasDefaultValue(o),R=C?this.pluralResolver.getSuffix(y,o.count,o):"",V=o["defaultValue".concat(R)]||o.defaultValue;!this.isValidLookup(u)&&I&&(dt=!0,u=V),this.isValidLookup(u)||(tt=!0,u=d);var Y=o.missingKeyNoValueFallbackToKey||this.options.missingKeyNoValueFallbackToKey,xt=Y&&tt?void 0:u,Ue=I&&V!==u&&this.options.updateMissing;if(tt||dt||Ue){if(this.logger.log(Ue?"updateKey":"missingKey",y,v,d,Ue?V:u),c){var Sn=this.resolve(d,ve(ve({},o),{},{keySeparator:!1}));Sn&&Sn.res&&this.logger.warn("Seems the loaded translations were in flat JSON format instead of nested. Either set keySeparator: false on init or make sure your translations are published in nested format.")}var Fe=[],Ot=this.languageUtils.getFallbackCodes(this.options.fallbackLng,o.lng||this.language);if(this.options.saveMissingTo==="fallback"&&Ot&&Ot[0])for(var _o=0;_o1&&arguments[1]!==void 0?arguments[1]:{},l,a,c,p,d;return typeof i=="string"&&(i=[i]),i.forEach(function(h){if(!o.isValidLookup(l)){var v=o.extractFromKey(h,s),y=v.key;a=y;var k=v.namespaces;o.options.fallbackNS&&(k=k.concat(o.options.fallbackNS));var O=s.count!==void 0&&typeof s.count!="string",f=O&&!s.ordinal&&s.count===0&&o.pluralResolver.shouldUseIntlApi(),u=s.context!==void 0&&(typeof s.context=="string"||typeof s.context=="number")&&s.context!=="",g=s.lngs?s.lngs:o.languageUtils.toResolveHierarchy(s.lng||o.language,s.fallbackLng);k.forEach(function(w){o.isValidLookup(l)||(d=w,!Pa["".concat(g[0],"-").concat(w)]&&o.utils&&o.utils.hasLoadedNamespace&&!o.utils.hasLoadedNamespace(d)&&(Pa["".concat(g[0],"-").concat(w)]=!0,o.logger.warn('key "'.concat(a,'" for languages "').concat(g.join(", "),`" won't get resolved as namespace "`).concat(d,'" was not yet loaded'),"This means something IS WRONG in your setup. You access the t function before i18next.init / i18next.loadNamespace / i18next.changeLanguage was done. Wait for the callback or Promise to resolve before accessing it!!!")),g.forEach(function(x){if(!o.isValidLookup(l)){p=x;var S=[y];if(o.i18nFormat&&o.i18nFormat.addLookupKeys)o.i18nFormat.addLookupKeys(S,y,x,w,s);else{var N;O&&(N=o.pluralResolver.getSuffix(x,s.count,s));var _="".concat(o.options.pluralSeparator,"zero");if(O&&(S.push(y+N),f&&S.push(y+_)),u){var L="".concat(y).concat(o.options.contextSeparator).concat(s.context);S.push(L),O&&(S.push(L+N),f&&S.push(L+_))}}for(var E;E=S.pop();)o.isValidLookup(l)||(c=E,l=o.getResource(x,w,E,s))}}))})}}),{res:l,usedKey:a,exactUsedKey:c,usedLng:p,usedNS:d}}},{key:"isValidLookup",value:function(i){return i!==void 0&&!(!this.options.returnNull&&i===null)&&!(!this.options.returnEmptyString&&i==="")}},{key:"getResource",value:function(i,o,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{};return this.i18nFormat&&this.i18nFormat.getResource?this.i18nFormat.getResource(i,o,s,l):this.resourceStore.getResource(i,o,s,l)}}],[{key:"hasDefaultValue",value:function(i){var o="defaultValue";for(var s in i)if(Object.prototype.hasOwnProperty.call(i,s)&&o===s.substring(0,o.length)&&i[s]!==void 0)return!0;return!1}}]),n}(Xt);function Io(e){return e.charAt(0).toUpperCase()+e.slice(1)}var _a=function(){function e(t){ct(this,e),this.options=t,this.supportedLngs=this.options.supportedLngs||!1,this.logger=vt.create("languageUtils")}return ft(e,[{key:"getScriptPartFromCode",value:function(n){if(!n||n.indexOf("-")<0)return null;var r=n.split("-");return r.length===2||(r.pop(),r[r.length-1].toLowerCase()==="x")?null:this.formatLanguageCode(r.join("-"))}},{key:"getLanguagePartFromCode",value:function(n){if(!n||n.indexOf("-")<0)return n;var r=n.split("-");return this.formatLanguageCode(r[0])}},{key:"formatLanguageCode",value:function(n){if(typeof n=="string"&&n.indexOf("-")>-1){var r=["hans","hant","latn","cyrl","cans","mong","arab"],i=n.split("-");return this.options.lowerCaseLng?i=i.map(function(o){return o.toLowerCase()}):i.length===2?(i[0]=i[0].toLowerCase(),i[1]=i[1].toUpperCase(),r.indexOf(i[1].toLowerCase())>-1&&(i[1]=Io(i[1].toLowerCase()))):i.length===3&&(i[0]=i[0].toLowerCase(),i[1].length===2&&(i[1]=i[1].toUpperCase()),i[0]!=="sgn"&&i[2].length===2&&(i[2]=i[2].toUpperCase()),r.indexOf(i[1].toLowerCase())>-1&&(i[1]=Io(i[1].toLowerCase())),r.indexOf(i[2].toLowerCase())>-1&&(i[2]=Io(i[2].toLowerCase()))),i.join("-")}return this.options.cleanCode||this.options.lowerCaseLng?n.toLowerCase():n}},{key:"isSupportedCode",value:function(n){return(this.options.load==="languageOnly"||this.options.nonExplicitSupportedLngs)&&(n=this.getLanguagePartFromCode(n)),!this.supportedLngs||!this.supportedLngs.length||this.supportedLngs.indexOf(n)>-1}},{key:"getBestMatchFromCodes",value:function(n){var r=this;if(!n)return null;var i;return n.forEach(function(o){if(!i){var s=r.formatLanguageCode(o);(!r.options.supportedLngs||r.isSupportedCode(s))&&(i=s)}}),!i&&this.options.supportedLngs&&n.forEach(function(o){if(!i){var s=r.getLanguagePartFromCode(o);if(r.isSupportedCode(s))return i=s;i=r.options.supportedLngs.find(function(l){if(l.indexOf(s)===0)return l})}}),i||(i=this.getFallbackCodes(this.options.fallbackLng)[0]),i}},{key:"getFallbackCodes",value:function(n,r){if(!n)return[];if(typeof n=="function"&&(n=n(r)),typeof n=="string"&&(n=[n]),Object.prototype.toString.apply(n)==="[object Array]")return n;if(!r)return n.default||[];var i=n[r];return i||(i=n[this.getScriptPartFromCode(r)]),i||(i=n[this.formatLanguageCode(r)]),i||(i=n[this.getLanguagePartFromCode(r)]),i||(i=n.default),i||[]}},{key:"toResolveHierarchy",value:function(n,r){var i=this,o=this.getFallbackCodes(r||this.options.fallbackLng||[],n),s=[],l=function(c){c&&(i.isSupportedCode(c)?s.push(c):i.logger.warn("rejecting language code not found in supportedLngs: ".concat(c)))};return typeof n=="string"&&n.indexOf("-")>-1?(this.options.load!=="languageOnly"&&l(this.formatLanguageCode(n)),this.options.load!=="languageOnly"&&this.options.load!=="currentOnly"&&l(this.getScriptPartFromCode(n)),this.options.load!=="currentOnly"&&l(this.getLanguagePartFromCode(n))):typeof n=="string"&&l(this.formatLanguageCode(n)),o.forEach(function(a){s.indexOf(a)<0&&l(i.formatLanguageCode(a))}),s}}]),e}(),_p=[{lngs:["ach","ak","am","arn","br","fil","gun","ln","mfe","mg","mi","oc","pt","pt-BR","tg","tl","ti","tr","uz","wa"],nr:[1,2],fc:1},{lngs:["af","an","ast","az","bg","bn","ca","da","de","dev","el","en","eo","es","et","eu","fi","fo","fur","fy","gl","gu","ha","hi","hu","hy","ia","it","kk","kn","ku","lb","mai","ml","mn","mr","nah","nap","nb","ne","nl","nn","no","nso","pa","pap","pms","ps","pt-PT","rm","sco","se","si","so","son","sq","sv","sw","ta","te","tk","ur","yo"],nr:[1,2],fc:2},{lngs:["ay","bo","cgg","fa","ht","id","ja","jbo","ka","km","ko","ky","lo","ms","sah","su","th","tt","ug","vi","wo","zh"],nr:[1],fc:3},{lngs:["be","bs","cnr","dz","hr","ru","sr","uk"],nr:[1,2,5],fc:4},{lngs:["ar"],nr:[0,1,2,3,11,100],fc:5},{lngs:["cs","sk"],nr:[1,2,5],fc:6},{lngs:["csb","pl"],nr:[1,2,5],fc:7},{lngs:["cy"],nr:[1,2,3,8],fc:8},{lngs:["fr"],nr:[1,2],fc:9},{lngs:["ga"],nr:[1,2,3,7,11],fc:10},{lngs:["gd"],nr:[1,2,3,20],fc:11},{lngs:["is"],nr:[1,2],fc:12},{lngs:["jv"],nr:[0,1],fc:13},{lngs:["kw"],nr:[1,2,3,4],fc:14},{lngs:["lt"],nr:[1,2,10],fc:15},{lngs:["lv"],nr:[1,2,0],fc:16},{lngs:["mk"],nr:[1,2],fc:17},{lngs:["mnk"],nr:[0,1,2],fc:18},{lngs:["mt"],nr:[1,2,11,20],fc:19},{lngs:["or"],nr:[2,1],fc:2},{lngs:["ro"],nr:[1,2,20],fc:20},{lngs:["sl"],nr:[5,1,2,3],fc:21},{lngs:["he","iw"],nr:[1,2,20,21],fc:22}],Ep={1:function(t){return+(t>1)},2:function(t){return+(t!=1)},3:function(t){return 0},4:function(t){return t%10==1&&t%100!=11?0:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?1:2},5:function(t){return t==0?0:t==1?1:t==2?2:t%100>=3&&t%100<=10?3:t%100>=11?4:5},6:function(t){return t==1?0:t>=2&&t<=4?1:2},7:function(t){return t==1?0:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?1:2},8:function(t){return t==1?0:t==2?1:t!=8&&t!=11?2:3},9:function(t){return+(t>=2)},10:function(t){return t==1?0:t==2?1:t<7?2:t<11?3:4},11:function(t){return t==1||t==11?0:t==2||t==12?1:t>2&&t<20?2:3},12:function(t){return+(t%10!=1||t%100==11)},13:function(t){return+(t!==0)},14:function(t){return t==1?0:t==2?1:t==3?2:3},15:function(t){return t%10==1&&t%100!=11?0:t%10>=2&&(t%100<10||t%100>=20)?1:2},16:function(t){return t%10==1&&t%100!=11?0:t!==0?1:2},17:function(t){return t==1||t%10==1&&t%100!=11?0:1},18:function(t){return t==0?0:t==1?1:2},19:function(t){return t==1?0:t==0||t%100>1&&t%100<11?1:t%100>10&&t%100<20?2:3},20:function(t){return t==1?0:t==0||t%100>0&&t%100<20?1:2},21:function(t){return t%100==1?1:t%100==2?2:t%100==3||t%100==4?3:0},22:function(t){return t==1?0:t==2?1:(t<0||t>10)&&t%10==0?2:3}},Cp=["v1","v2","v3"],Ea={zero:0,one:1,two:2,few:3,many:4,other:5};function jp(){var e={};return _p.forEach(function(t){t.lngs.forEach(function(n){e[n]={numbers:t.nr,plurals:Ep[t.fc]}})}),e}var Ip=function(){function e(t){var n=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};ct(this,e),this.languageUtils=t,this.options=n,this.logger=vt.create("pluralResolver"),(!this.options.compatibilityJSON||this.options.compatibilityJSON==="v4")&&(typeof Intl>"u"||!Intl.PluralRules)&&(this.options.compatibilityJSON="v3",this.logger.error("Your environment seems not to be Intl API compatible, use an Intl.PluralRules polyfill. Will fallback to the compatibilityJSON v3 format handling.")),this.rules=jp()}return ft(e,[{key:"addRule",value:function(n,r){this.rules[n]=r}},{key:"getRule",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};if(this.shouldUseIntlApi())try{return new Intl.PluralRules(n,{type:r.ordinal?"ordinal":"cardinal"})}catch{return}return this.rules[n]||this.rules[this.languageUtils.getLanguagePartFromCode(n)]}},{key:"needsPlural",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},i=this.getRule(n,r);return this.shouldUseIntlApi()?i&&i.resolvedOptions().pluralCategories.length>1:i&&i.numbers.length>1}},{key:"getPluralFormsOfKey",value:function(n,r){var i=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{};return this.getSuffixes(n,i).map(function(o){return"".concat(r).concat(o)})}},{key:"getSuffixes",value:function(n){var r=this,i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},o=this.getRule(n,i);return o?this.shouldUseIntlApi()?o.resolvedOptions().pluralCategories.sort(function(s,l){return Ea[s]-Ea[l]}).map(function(s){return"".concat(r.options.prepend).concat(s)}):o.numbers.map(function(s){return r.getSuffix(n,s,i)}):[]}},{key:"getSuffix",value:function(n,r){var i=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},o=this.getRule(n,i);return o?this.shouldUseIntlApi()?"".concat(this.options.prepend).concat(o.select(r)):this.getSuffixRetroCompatible(o,r):(this.logger.warn("no plural rule found for: ".concat(n)),"")}},{key:"getSuffixRetroCompatible",value:function(n,r){var i=this,o=n.noAbs?n.plurals(r):n.plurals(Math.abs(r)),s=n.numbers[o];this.options.simplifyPluralSuffix&&n.numbers.length===2&&n.numbers[0]===1&&(s===2?s="plural":s===1&&(s=""));var l=function(){return i.options.prepend&&s.toString()?i.options.prepend+s.toString():s.toString()};return this.options.compatibilityJSON==="v1"?s===1?"":typeof s=="number"?"_plural_".concat(s.toString()):l():this.options.compatibilityJSON==="v2"||this.options.simplifyPluralSuffix&&n.numbers.length===2&&n.numbers[0]===1?l():this.options.prepend&&o.toString()?this.options.prepend+o.toString():o.toString()}},{key:"shouldUseIntlApi",value:function(){return!Cp.includes(this.options.compatibilityJSON)}}]),e}();function Ca(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(i){return Object.getOwnPropertyDescriptor(e,i).enumerable})),n.push.apply(n,r)}return n}function nt(e){for(var t=1;t0&&arguments[0]!==void 0?arguments[0]:{};ct(this,e),this.logger=vt.create("interpolator"),this.options=t,this.format=t.interpolation&&t.interpolation.format||function(n){return n},this.init(t)}return ft(e,[{key:"init",value:function(){var n=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};n.interpolation||(n.interpolation={escapeValue:!0});var r=n.interpolation;this.escape=r.escape!==void 0?r.escape:yp,this.escapeValue=r.escapeValue!==void 0?r.escapeValue:!0,this.useRawValueToEscape=r.useRawValueToEscape!==void 0?r.useRawValueToEscape:!1,this.prefix=r.prefix?On(r.prefix):r.prefixEscaped||"{{",this.suffix=r.suffix?On(r.suffix):r.suffixEscaped||"}}",this.formatSeparator=r.formatSeparator?r.formatSeparator:r.formatSeparator||",",this.unescapePrefix=r.unescapeSuffix?"":r.unescapePrefix||"-",this.unescapeSuffix=this.unescapePrefix?"":r.unescapeSuffix||"",this.nestingPrefix=r.nestingPrefix?On(r.nestingPrefix):r.nestingPrefixEscaped||On("$t("),this.nestingSuffix=r.nestingSuffix?On(r.nestingSuffix):r.nestingSuffixEscaped||On(")"),this.nestingOptionsSeparator=r.nestingOptionsSeparator?r.nestingOptionsSeparator:r.nestingOptionsSeparator||",",this.maxReplaces=r.maxReplaces?r.maxReplaces:1e3,this.alwaysFormat=r.alwaysFormat!==void 0?r.alwaysFormat:!1,this.resetRegExp()}},{key:"reset",value:function(){this.options&&this.init(this.options)}},{key:"resetRegExp",value:function(){var n="".concat(this.prefix,"(.+?)").concat(this.suffix);this.regexp=new RegExp(n,"g");var r="".concat(this.prefix).concat(this.unescapePrefix,"(.+?)").concat(this.unescapeSuffix).concat(this.suffix);this.regexpUnescape=new RegExp(r,"g");var i="".concat(this.nestingPrefix,"(.+?)").concat(this.nestingSuffix);this.nestingRegexp=new RegExp(i,"g")}},{key:"interpolate",value:function(n,r,i,o){var s=this,l,a,c,p=this.options&&this.options.interpolation&&this.options.interpolation.defaultVariables||{};function d(O){return O.replace(/\$/g,"$$$$")}var h=function(f){if(f.indexOf(s.formatSeparator)<0){var u=Sa(r,p,f);return s.alwaysFormat?s.format(u,void 0,i,nt(nt(nt({},o),r),{},{interpolationkey:f})):u}var g=f.split(s.formatSeparator),w=g.shift().trim(),x=g.join(s.formatSeparator).trim();return s.format(Sa(r,p,w),x,i,nt(nt(nt({},o),r),{},{interpolationkey:w}))};this.resetRegExp();var v=o&&o.missingInterpolationHandler||this.options.missingInterpolationHandler,y=o&&o.interpolation&&o.interpolation.skipOnVariables!==void 0?o.interpolation.skipOnVariables:this.options.interpolation.skipOnVariables,k=[{regex:this.regexpUnescape,safeValue:function(f){return d(f)}},{regex:this.regexp,safeValue:function(f){return s.escapeValue?d(s.escape(f)):d(f)}}];return k.forEach(function(O){for(c=0;l=O.regex.exec(n);){var f=l[1].trim();if(a=h(f),a===void 0)if(typeof v=="function"){var u=v(n,l,o);a=typeof u=="string"?u:""}else if(o&&Object.prototype.hasOwnProperty.call(o,f))a="";else if(y){a=l[0];continue}else s.logger.warn("missed to pass in variable ".concat(f," for interpolating ").concat(n)),a="";else typeof a!="string"&&!s.useRawValueToEscape&&(a=wa(a));var g=O.safeValue(a);if(n=n.replace(l[0],g),y?(O.regex.lastIndex+=a.length,O.regex.lastIndex-=l[0].length):O.regex.lastIndex=0,c++,c>=s.maxReplaces)break}}),n}},{key:"nest",value:function(n,r){var i=this,o=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},s,l,a;function c(v,y){var k=this.nestingOptionsSeparator;if(v.indexOf(k)<0)return v;var O=v.split(new RegExp("".concat(k,"[ ]*{"))),f="{".concat(O[1]);v=O[0],f=this.interpolate(f,a);var u=f.match(/'/g),g=f.match(/"/g);(u&&u.length%2===0&&!g||g.length%2!==0)&&(f=f.replace(/'/g,'"'));try{a=JSON.parse(f),y&&(a=nt(nt({},y),a))}catch(w){return this.logger.warn("failed parsing options string in nesting for key ".concat(v),w),"".concat(v).concat(k).concat(f)}return delete a.defaultValue,v}for(;s=this.nestingRegexp.exec(n);){var p=[];a=nt({},o),a=a.replace&&typeof a.replace!="string"?a.replace:a,a.applyPostProcessor=!1,delete a.defaultValue;var d=!1;if(s[0].indexOf(this.formatSeparator)!==-1&&!/{.*}/.test(s[1])){var h=s[1].split(this.formatSeparator).map(function(v){return v.trim()});s[1]=h.shift(),p=h,d=!0}if(l=r(c.call(this,s[1].trim(),a),a),l&&s[0]===n&&typeof l!="string")return l;typeof l!="string"&&(l=wa(l)),l||(this.logger.warn("missed to resolve ".concat(s[1]," for nesting ").concat(n)),l=""),d&&(l=p.reduce(function(v,y){return i.format(v,y,o.lng,nt(nt({},o),{},{interpolationkey:s[1].trim()}))},l.trim())),n=n.replace(s[0],l),this.regexp.lastIndex=0}return n}}]),e}();function ja(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(i){return Object.getOwnPropertyDescriptor(e,i).enumerable})),n.push.apply(n,r)}return n}function Pt(e){for(var t=1;t-1){var r=e.split("(");t=r[0].toLowerCase().trim();var i=r[1].substring(0,r[1].length-1);if(t==="currency"&&i.indexOf(":")<0)n.currency||(n.currency=i.trim());else if(t==="relativetime"&&i.indexOf(":")<0)n.range||(n.range=i.trim());else{var o=i.split(";");o.forEach(function(s){if(s){var l=s.split(":"),a=dp(l),c=a[0],p=a.slice(1),d=p.join(":").trim().replace(/^'+|'+$/g,"");n[c.trim()]||(n[c.trim()]=d),d==="false"&&(n[c.trim()]=!1),d==="true"&&(n[c.trim()]=!0),isNaN(d)||(n[c.trim()]=parseInt(d,10))}})}}return{formatName:t,formatOptions:n}}function Pn(e){var t={};return function(r,i,o){var s=i+JSON.stringify(o),l=t[s];return l||(l=e(i,o),t[s]=l),l(r)}}var Tp=function(){function e(){var t=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};ct(this,e),this.logger=vt.create("formatter"),this.options=t,this.formats={number:Pn(function(n,r){var i=new Intl.NumberFormat(n,Pt({},r));return function(o){return i.format(o)}}),currency:Pn(function(n,r){var i=new Intl.NumberFormat(n,Pt(Pt({},r),{},{style:"currency"}));return function(o){return i.format(o)}}),datetime:Pn(function(n,r){var i=new Intl.DateTimeFormat(n,Pt({},r));return function(o){return i.format(o)}}),relativetime:Pn(function(n,r){var i=new Intl.RelativeTimeFormat(n,Pt({},r));return function(o){return i.format(o,r.range||"day")}}),list:Pn(function(n,r){var i=new Intl.ListFormat(n,Pt({},r));return function(o){return i.format(o)}})},this.init(t)}return ft(e,[{key:"init",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{interpolation:{}},i=r.interpolation;this.formatSeparator=i.formatSeparator?i.formatSeparator:i.formatSeparator||","}},{key:"add",value:function(n,r){this.formats[n.toLowerCase().trim()]=r}},{key:"addCached",value:function(n,r){this.formats[n.toLowerCase().trim()]=Pn(r)}},{key:"format",value:function(n,r,i){var o=this,s=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{},l=r.split(this.formatSeparator),a=l.reduce(function(c,p){var d=Rp(p),h=d.formatName,v=d.formatOptions;if(o.formats[h]){var y=c;try{var k=s&&s.formatParams&&s.formatParams[s.interpolationkey]||{},O=k.locale||k.lng||s.locale||s.lng||i;y=o.formats[h](c,O,Pt(Pt(Pt({},v),s),k))}catch(f){o.logger.warn(f)}return y}else o.logger.warn("there was no format function for ".concat(h));return c},n);return a}}]),e}();function Ia(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(i){return Object.getOwnPropertyDescriptor(e,i).enumerable})),n.push.apply(n,r)}return n}function La(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function Up(e,t){e.pending[t]!==void 0&&(delete e.pending[t],e.pendingCount--)}var Fp=function(e){ao(n,e);var t=Ap(n);function n(r,i,o){var s,l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{};return ct(this,n),s=t.call(this),uo&&Xt.call(Mt(s)),s.backend=r,s.store=i,s.services=o,s.languageUtils=o.languageUtils,s.options=l,s.logger=vt.create("backendConnector"),s.waitingReads=[],s.maxParallelReads=l.maxParallelReads||10,s.readingCalls=0,s.maxRetries=l.maxRetries>=0?l.maxRetries:5,s.retryTimeout=l.retryTimeout>=1?l.retryTimeout:350,s.state={},s.queue=[],s.backend&&s.backend.init&&s.backend.init(o,l.backend,l),s}return ft(n,[{key:"queueLoad",value:function(i,o,s,l){var a=this,c={},p={},d={},h={};return i.forEach(function(v){var y=!0;o.forEach(function(k){var O="".concat(v,"|").concat(k);!s.reload&&a.store.hasResourceBundle(v,k)?a.state[O]=2:a.state[O]<0||(a.state[O]===1?p[O]===void 0&&(p[O]=!0):(a.state[O]=1,y=!1,p[O]===void 0&&(p[O]=!0),c[O]===void 0&&(c[O]=!0),h[k]===void 0&&(h[k]=!0)))}),y||(d[v]=!0)}),(Object.keys(c).length||Object.keys(p).length)&&this.queue.push({pending:p,pendingCount:Object.keys(p).length,loaded:{},errors:[],callback:l}),{toLoad:Object.keys(c),pending:Object.keys(p),toLoadLanguages:Object.keys(d),toLoadNamespaces:Object.keys(h)}}},{key:"loaded",value:function(i,o,s){var l=i.split("|"),a=l[0],c=l[1];o&&this.emit("failedLoading",a,c,o),s&&this.store.addResourceBundle(a,c,s),this.state[i]=o?-1:2;var p={};this.queue.forEach(function(d){mp(d.loaded,[a],c),Up(d,i),o&&d.errors.push(o),d.pendingCount===0&&!d.done&&(Object.keys(d.loaded).forEach(function(h){p[h]||(p[h]={});var v=d.loaded[h];v.length&&v.forEach(function(y){p[h][y]===void 0&&(p[h][y]=!0)})}),d.done=!0,d.errors.length?d.callback(d.errors):d.callback())}),this.emit("loaded",p),this.queue=this.queue.filter(function(d){return!d.done})}},{key:"read",value:function(i,o,s){var l=this,a=arguments.length>3&&arguments[3]!==void 0?arguments[3]:0,c=arguments.length>4&&arguments[4]!==void 0?arguments[4]:this.retryTimeout,p=arguments.length>5?arguments[5]:void 0;if(!i.length)return p(null,{});if(this.readingCalls>=this.maxParallelReads){this.waitingReads.push({lng:i,ns:o,fcName:s,tried:a,wait:c,callback:p});return}this.readingCalls++;var d=function(k,O){if(l.readingCalls--,l.waitingReads.length>0){var f=l.waitingReads.shift();l.read(f.lng,f.ns,f.fcName,f.tried,f.wait,f.callback)}if(k&&O&&a2&&arguments[2]!==void 0?arguments[2]:{},a=arguments.length>3?arguments[3]:void 0;if(!this.backend)return this.logger.warn("No backend was added via i18next.use. Will not load resources."),a&&a();typeof i=="string"&&(i=this.languageUtils.toResolveHierarchy(i)),typeof o=="string"&&(o=[o]);var c=this.queueLoad(i,o,l,a);if(!c.toLoad.length)return c.pending.length||a(),null;c.toLoad.forEach(function(p){s.loadOne(p)})}},{key:"load",value:function(i,o,s){this.prepareLoading(i,o,{},s)}},{key:"reload",value:function(i,o,s){this.prepareLoading(i,o,{reload:!0},s)}},{key:"loadOne",value:function(i){var o=this,s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:"",l=i.split("|"),a=l[0],c=l[1];this.read(a,c,"read",void 0,void 0,function(p,d){p&&o.logger.warn("".concat(s,"loading namespace ").concat(c," for language ").concat(a," failed"),p),!p&&d&&o.logger.log("".concat(s,"loaded namespace ").concat(c," for language ").concat(a),d),o.loaded(i,p,d)})}},{key:"saveMissing",value:function(i,o,s,l,a){var c=arguments.length>5&&arguments[5]!==void 0?arguments[5]:{},p=arguments.length>6&&arguments[6]!==void 0?arguments[6]:function(){};if(this.services.utils&&this.services.utils.hasLoadedNamespace&&!this.services.utils.hasLoadedNamespace(o)){this.logger.warn('did not save key "'.concat(s,'" as the namespace "').concat(o,'" was not yet loaded'),"This means something IS WRONG in your setup. You access the t function before i18next.init / i18next.loadNamespace / i18next.changeLanguage was done. Wait for the callback or Promise to resolve before accessing it!!!");return}if(!(s==null||s==="")){if(this.backend&&this.backend.create){var d=La(La({},c),{},{isUpdate:a}),h=this.backend.create.bind(this.backend);if(h.length<6)try{var v;h.length===5?v=h(i,o,s,l,d):v=h(i,o,s,l),v&&typeof v.then=="function"?v.then(function(y){return p(null,y)}).catch(p):p(null,v)}catch(y){p(y)}else h(i,o,s,l,p,d)}!i||!i[0]||this.store.addResource(i[0],o,s,l)}}}]),n}(Xt);function Ra(){return{debug:!1,initImmediate:!0,ns:["translation"],defaultNS:["translation"],fallbackLng:["dev"],fallbackNS:!1,supportedLngs:!1,nonExplicitSupportedLngs:!1,load:"all",preload:!1,simplifyPluralSuffix:!0,keySeparator:".",nsSeparator:":",pluralSeparator:"_",contextSeparator:"_",partialBundledLanguages:!1,saveMissing:!1,updateMissing:!1,saveMissingTo:"fallback",saveMissingPlurals:!0,missingKeyHandler:!1,missingInterpolationHandler:!1,postProcess:!1,postProcessPassResolved:!1,returnNull:!0,returnEmptyString:!0,returnObjects:!1,joinArrays:!1,returnedObjectHandler:!1,parseMissingKeyHandler:!1,appendNamespaceToMissingKey:!1,appendNamespaceToCIMode:!1,overloadTranslationOptionHandler:function(t){var n={};if(ue(t[1])==="object"&&(n=t[1]),typeof t[1]=="string"&&(n.defaultValue=t[1]),typeof t[2]=="string"&&(n.tDescription=t[2]),ue(t[2])==="object"||ue(t[3])==="object"){var r=t[3]||t[2];Object.keys(r).forEach(function(i){n[i]=r[i]})}return n},interpolation:{escapeValue:!0,format:function(t,n,r,i){return t},prefix:"{{",suffix:"}}",formatSeparator:",",unescapePrefix:"-",nestingPrefix:"$t(",nestingSuffix:")",nestingOptionsSeparator:",",maxReplaces:1e3,skipOnVariables:!0}}}function Ta(e){return typeof e.ns=="string"&&(e.ns=[e.ns]),typeof e.fallbackLng=="string"&&(e.fallbackLng=[e.fallbackLng]),typeof e.fallbackNS=="string"&&(e.fallbackNS=[e.fallbackNS]),e.supportedLngs&&e.supportedLngs.indexOf("cimode")<0&&(e.supportedLngs=e.supportedLngs.concat(["cimode"])),e}function Aa(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(i){return Object.getOwnPropertyDescriptor(e,i).enumerable})),n.push.apply(n,r)}return n}function pt(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function ri(){}function bp(e){var t=Object.getOwnPropertyNames(Object.getPrototypeOf(e));t.forEach(function(n){typeof e[n]=="function"&&(e[n]=e[n].bind(e))})}var Di=function(e){ao(n,e);var t=zp(n);function n(){var r,i=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},o=arguments.length>1?arguments[1]:void 0;if(ct(this,n),r=t.call(this),uo&&Xt.call(Mt(r)),r.options=Ta(i),r.services={},r.logger=vt,r.modules={external:[]},bp(Mt(r)),o&&!r.isInitialized&&!i.isClone){if(!r.options.initImmediate)return r.init(i,o),Gr(r,Mt(r));setTimeout(function(){r.init(i,o)},0)}return r}return ft(n,[{key:"init",value:function(){var i=this,o=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},s=arguments.length>1?arguments[1]:void 0;typeof o=="function"&&(s=o,o={}),!o.defaultNS&&o.defaultNS!==!1&&o.ns&&(typeof o.ns=="string"?o.defaultNS=o.ns:o.ns.indexOf("translation")<0&&(o.defaultNS=o.ns[0]));var l=Ra();this.options=pt(pt(pt({},l),this.options),Ta(o)),this.options.compatibilityAPI!=="v1"&&(this.options.interpolation=pt(pt({},l.interpolation),this.options.interpolation)),o.keySeparator!==void 0&&(this.options.userDefinedKeySeparator=o.keySeparator),o.nsSeparator!==void 0&&(this.options.userDefinedNsSeparator=o.nsSeparator);function a(f){return f?typeof f=="function"?new f:f:null}if(!this.options.isClone){this.modules.logger?vt.init(a(this.modules.logger),this.options):vt.init(null,this.options);var c;this.modules.formatter?c=this.modules.formatter:typeof Intl<"u"&&(c=Tp);var p=new _a(this.options);this.store=new Op(this.options.resources,this.options);var d=this.services;d.logger=vt,d.resourceStore=this.store,d.languageUtils=p,d.pluralResolver=new Ip(p,{prepend:this.options.pluralSeparator,compatibilityJSON:this.options.compatibilityJSON,simplifyPluralSuffix:this.options.simplifyPluralSuffix}),c&&(!this.options.interpolation.format||this.options.interpolation.format===l.interpolation.format)&&(d.formatter=a(c),d.formatter.init(d,this.options),this.options.interpolation.format=d.formatter.format.bind(d.formatter)),d.interpolator=new Lp(this.options),d.utils={hasLoadedNamespace:this.hasLoadedNamespace.bind(this)},d.backendConnector=new Fp(a(this.modules.backend),d.resourceStore,d,this.options),d.backendConnector.on("*",function(f){for(var u=arguments.length,g=new Array(u>1?u-1:0),w=1;w1?u-1:0),w=1;w0&&h[0]!=="dev"&&(this.options.lng=h[0])}!this.services.languageDetector&&!this.options.lng&&this.logger.warn("init: no languageDetector is used and no lng is defined");var v=["getResource","hasResourceBundle","getResourceBundle","getDataByLanguage"];v.forEach(function(f){i[f]=function(){var u;return(u=i.store)[f].apply(u,arguments)}});var y=["addResource","addResources","addResourceBundle","removeResourceBundle"];y.forEach(function(f){i[f]=function(){var u;return(u=i.store)[f].apply(u,arguments),i}});var k=or(),O=function(){var u=function(w,x){i.isInitialized&&!i.initializedStoreOnce&&i.logger.warn("init: i18next is already initialized. You should call init just once!"),i.isInitialized=!0,i.options.isClone||i.logger.log("initialized",i.options),i.emit("initialized",i.options),k.resolve(x),s(w,x)};if(i.languages&&i.options.compatibilityAPI!=="v1"&&!i.isInitialized)return u(null,i.t.bind(i));i.changeLanguage(i.options.lng,u)};return this.options.resources||!this.options.initImmediate?O():setTimeout(O,0),k}},{key:"loadResources",value:function(i){var o=this,s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:ri,l=s,a=typeof i=="string"?i:this.language;if(typeof i=="function"&&(l=i),!this.options.resources||this.options.partialBundledLanguages){if(a&&a.toLowerCase()==="cimode")return l();var c=[],p=function(v){if(v){var y=o.services.languageUtils.toResolveHierarchy(v);y.forEach(function(k){c.indexOf(k)<0&&c.push(k)})}};if(a)p(a);else{var d=this.services.languageUtils.getFallbackCodes(this.options.fallbackLng);d.forEach(function(h){return p(h)})}this.options.preload&&this.options.preload.forEach(function(h){return p(h)}),this.services.backendConnector.load(c,this.options.ns,function(h){!h&&!o.resolvedLanguage&&o.language&&o.setResolvedLanguage(o.language),l(h)})}else l(null)}},{key:"reloadResources",value:function(i,o,s){var l=or();return i||(i=this.languages),o||(o=this.options.ns),s||(s=ri),this.services.backendConnector.reload(i,o,function(a){l.resolve(),s(a)}),l}},{key:"use",value:function(i){if(!i)throw new Error("You are passing an undefined module! Please check the object you are passing to i18next.use()");if(!i.type)throw new Error("You are passing a wrong module! Please check the object you are passing to i18next.use()");return i.type==="backend"&&(this.modules.backend=i),(i.type==="logger"||i.log&&i.warn&&i.error)&&(this.modules.logger=i),i.type==="languageDetector"&&(this.modules.languageDetector=i),i.type==="i18nFormat"&&(this.modules.i18nFormat=i),i.type==="postProcessor"&&mc.addPostProcessor(i),i.type==="formatter"&&(this.modules.formatter=i),i.type==="3rdParty"&&this.modules.external.push(i),this}},{key:"setResolvedLanguage",value:function(i){if(!(!i||!this.languages)&&!(["cimode","dev"].indexOf(i)>-1))for(var o=0;o-1)&&this.store.hasLanguageSomeTranslations(s)){this.resolvedLanguage=s;break}}}},{key:"changeLanguage",value:function(i,o){var s=this;this.isLanguageChangingTo=i;var l=or();this.emit("languageChanging",i);var a=function(h){s.language=h,s.languages=s.services.languageUtils.toResolveHierarchy(h),s.resolvedLanguage=void 0,s.setResolvedLanguage(h)},c=function(h,v){v?(a(v),s.translator.changeLanguage(v),s.isLanguageChangingTo=void 0,s.emit("languageChanged",v),s.logger.log("languageChanged",v)):s.isLanguageChangingTo=void 0,l.resolve(function(){return s.t.apply(s,arguments)}),o&&o(h,function(){return s.t.apply(s,arguments)})},p=function(h){!i&&!h&&s.services.languageDetector&&(h=[]);var v=typeof h=="string"?h:s.services.languageUtils.getBestMatchFromCodes(h);v&&(s.language||a(v),s.translator.language||s.translator.changeLanguage(v),s.services.languageDetector&&s.services.languageDetector.cacheUserLanguage&&s.services.languageDetector.cacheUserLanguage(v)),s.loadResources(v,function(y){c(y,v)})};return!i&&this.services.languageDetector&&!this.services.languageDetector.async?p(this.services.languageDetector.detect()):!i&&this.services.languageDetector&&this.services.languageDetector.async?this.services.languageDetector.detect.length===0?this.services.languageDetector.detect().then(p):this.services.languageDetector.detect(p):p(i),l}},{key:"getFixedT",value:function(i,o,s){var l=this,a=function c(p,d){var h;if(ue(d)!=="object"){for(var v=arguments.length,y=new Array(v>2?v-2:0),k=2;k1&&arguments[1]!==void 0?arguments[1]:{};if(!this.isInitialized)return this.logger.warn("hasLoadedNamespace: i18next was not initialized",this.languages),!1;if(!this.languages||!this.languages.length)return this.logger.warn("hasLoadedNamespace: i18n.languages were undefined or empty",this.languages),!1;var l=this.resolvedLanguage||this.languages[0],a=this.options?this.options.fallbackLng:!1,c=this.languages[this.languages.length-1];if(l.toLowerCase()==="cimode")return!0;var p=function(v,y){var k=o.services.backendConnector.state["".concat(v,"|").concat(y)];return k===-1||k===2};if(s.precheck){var d=s.precheck(this,p);if(d!==void 0)return d}return!!(this.hasResourceBundle(l,i)||!this.services.backendConnector.backend||this.options.resources&&!this.options.partialBundledLanguages||p(l,i)&&(!a||p(c,i)))}},{key:"loadNamespaces",value:function(i,o){var s=this,l=or();return this.options.ns?(typeof i=="string"&&(i=[i]),i.forEach(function(a){s.options.ns.indexOf(a)<0&&s.options.ns.push(a)}),this.loadResources(function(a){l.resolve(),o&&o(a)}),l):(o&&o(),Promise.resolve())}},{key:"loadLanguages",value:function(i,o){var s=or();typeof i=="string"&&(i=[i]);var l=this.options.preload||[],a=i.filter(function(c){return l.indexOf(c)<0});return a.length?(this.options.preload=l.concat(a),this.loadResources(function(c){s.resolve(),o&&o(c)}),s):(o&&o(),Promise.resolve())}},{key:"dir",value:function(i){if(i||(i=this.resolvedLanguage||(this.languages&&this.languages.length>0?this.languages[0]:this.language)),!i)return"rtl";var o=["ar","shu","sqr","ssh","xaa","yhd","yud","aao","abh","abv","acm","acq","acw","acx","acy","adf","ads","aeb","aec","afb","ajp","apc","apd","arb","arq","ars","ary","arz","auz","avl","ayh","ayl","ayn","ayp","bbz","pga","he","iw","ps","pbt","pbu","pst","prp","prd","ug","ur","ydd","yds","yih","ji","yi","hbo","men","xmn","fa","jpr","peo","pes","prs","dv","sam","ckb"],s=this.services&&this.services.languageUtils||new _a(Ra());return o.indexOf(s.getLanguagePartFromCode(i))>-1||i.toLowerCase().indexOf("-arab")>1?"rtl":"ltr"}},{key:"cloneInstance",value:function(){var i=this,o=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:ri,l=pt(pt(pt({},this.options),o),{isClone:!0}),a=new n(l);(o.debug!==void 0||o.prefix!==void 0)&&(a.logger=a.logger.clone(o));var c=["store","services","language"];return c.forEach(function(p){a[p]=i[p]}),a.services=pt({},this.services),a.services.utils={hasLoadedNamespace:a.hasLoadedNamespace.bind(a)},a.translator=new Na(a.services,a.options),a.translator.on("*",function(p){for(var d=arguments.length,h=new Array(d>1?d-1:0),v=1;v0&&arguments[0]!==void 0?arguments[0]:{},t=arguments.length>1?arguments[1]:void 0;return new Di(e,t)});var le=Di.createInstance();le.createInstance=Di.createInstance;le.createInstance;le.dir;le.init;le.loadResources;le.reloadResources;le.use;le.changeLanguage;le.getFixedT;le.t;le.exists;le.setDefaultNamespace;le.hasLoadedNamespace;le.loadNamespaces;le.loadLanguages;function $p(e,t){if(e==null)return{};var n={},r=Object.keys(e),i,o;for(o=0;o=0)&&(n[i]=e[i]);return n}function dl(e,t){if(e==null)return{};var n=$p(e,t),r,i;if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(i=0;i=0)&&Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var Mp={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0};const Bp=qu(Mp);var Kp=/\s([^'"/\s><]+?)[\s/>]|([^\s=]+)=\s?(".*?"|'.*?')/g;function Da(e){var t={type:"tag",name:"",voidElement:!1,attrs:{},children:[]},n=e.match(/<\/?([^\s]+?)[/\s>]/);if(n&&(t.name=n[1],(Bp[n[1]]||e.charAt(e.length-2)==="/")&&(t.voidElement=!0),t.name.startsWith("!--"))){var r=e.indexOf("-->");return{type:"comment",comment:r!==-1?e.slice(4,r):""}}for(var i=new RegExp(Kp),o=null;(o=i.exec(e))!==null;)if(o[0].trim())if(o[1]){var s=o[1].trim(),l=[s,""];s.indexOf("=")>-1&&(l=s.split("=")),t.attrs[l[0]]=l[1],i.lastIndex--}else o[2]&&(t.attrs[o[2]]=o[3].trim().substring(1,o[3].length-1));return t}var Hp=/<[a-zA-Z0-9\-\!\/](?:"[^"]*"|'[^']*'|[^'">])*>/g,Gp=/^\s*$/,Wp=Object.create(null);function vc(e,t){switch(t.type){case"text":return e+t.content;case"tag":return e+="<"+t.name+(t.attrs?function(n){var r=[];for(var i in n)r.push(i+'="'+n[i]+'"');return r.length?" "+r.join(" "):""}(t.attrs):"")+(t.voidElement?"/>":">"),t.voidElement?e:e+t.children.reduce(vc,"")+"";case"comment":return e+""}}var Yp={parse:function(e,t){t||(t={}),t.components||(t.components=Wp);var n,r=[],i=[],o=-1,s=!1;if(e.indexOf("<")!==0){var l=e.indexOf("<");r.push({type:"text",content:l===-1?e:e.substring(0,l)})}return e.replace(Hp,function(a,c){if(s){if(a!=="")return;s=!1}var p,d=a.charAt(1)!=="/",h=a.startsWith("");return{type:"comment",comment:r!==-1?e.slice(4,r):""}}for(var i=new RegExp(Zp),o=null;(o=i.exec(e))!==null;)if(o[0].trim())if(o[1]){var s=o[1].trim(),l=[s,""];s.indexOf("=")>-1&&(l=s.split("=")),t.attrs[l[0]]=l[1],i.lastIndex--}else o[2]&&(t.attrs[o[2]]=o[3].trim().substring(1,o[3].length-1));return t}var qp=/<[a-zA-Z0-9\-\!\/](?:"[^"]*"|'[^']*'|[^'">])*>/g,eh=/^\s*$/,th=Object.create(null);function _c(e,t){switch(t.type){case"text":return e+t.content;case"tag":return e+="<"+t.name+(t.attrs?function(n){var r=[];for(var i in n)r.push(i+'="'+n[i]+'"');return r.length?" "+r.join(" "):""}(t.attrs):"")+(t.voidElement?"/>":">"),t.voidElement?e:e+t.children.reduce(_c,"")+"";case"comment":return e+""}}var nh={parse:function(e,t){t||(t={}),t.components||(t.components=th);var n,r=[],i=[],o=-1,s=!1;if(e.indexOf("<")!==0){var l=e.indexOf("<");r.push({type:"text",content:l===-1?e:e.substring(0,l)})}return e.replace(qp,function(a,u){if(s){if(a!=="")return;s=!1}var p,d=a.charAt(1)!=="/",h=a.startsWith("MN3*{r~H@TFWP>U)sX_Ievt;Ke$qWUMw3W^|82H>&gp#4-7Z^Q+6$_s zzsFq8)SFJ%X2As0W zj*;;t19MVbze91S*aHwaGX`6v(cqoQy6RUr8JSW}>PQMAyV27QL7TXd9~}&XmbxGr(wI_$ zJ-KNKhgD?vJ!e4X3ZICBoSHevNp9ZpZ{=kYrX0JvtWtoeLnzW8e2dd^=bW86co2;g zFpY0eKQ%rlLuL(Mf`iPNHP{kKCMNTj0Ob51UPW69y&};XudTHMrcGN{Q0iV8l_?ym zPf?nH1$a^MdHw%x=PU$N$($)>A#F3{Pqb+lo88Yo1XIX8=nuXZ9=tK;Yi*;75BkU{-u_1)ou*g};nZW38g_r+D-;J^sM__yhCf|AE=*b;;n_0GaEt3UIXFtLls!Kx{j43;MV`X1#>mEXM}brI*SQv zycKhXdP{PAMgO=K{vKLpwBeAF#`pqRzP_d(l-!(&y_?@SbHU;oyDbQ0Ng^ak-LG3h8V(I{VUPtu>)8zN@ko&~f> z^h~|aN5e;+xiGT&O|*vT=SE(*hs!sQ6` zjd_`Vy`waxgS~=z?Se1&IAjO^)QKb@^zWb$kxb3>`&p0T&Nb_h4NOM*%^g5yBz z2+g30B8X{dugEgpTDVpOt2mSfX-6y%N#^*1m{7|{F1B??QT0^)l{&&Dg+-e|O%p9A zvvVgokw-DSCBms(gRZ0MO0Nhv7vLgrqZyX=qDH&G`3V&L`9gNlrKw)1a zuY;LvC^6+Nd{)5PV`4~arml!>Us#9i@X(r(q|5Ig7~&N7=$A%e8=~H*1zDY~n9T1_ z@WR~wWnmUo=)9&QPJo@&y+qBbSoT~2Nk6YD$sA;y`M;VTT;LG=l+dCmkF1z>8-vr zSN5#us|z-fb6+z0FX90$cm!!oL@jULp`i{*I#pdkJhvtIC3Ei_Q!n$N%u~M_eAxp- zTJ_p~z!IT(*S{1Q>R|rWW`8Qi5P-g(olig3G~kaF?X576jW$rCYxYP_`YkgJz)}Ipes0QsM7ePY(*Dxpz;66d&{6Wk4&aKqKybPp@btAHNe@gMO3xnDCo4Qr)=-p`syEodQWej=Ft&3%5;NzjeI~TZCK#1RfSFUnhMZc?!vCzP!_r{kH-##WG4Q zz~VW%bXVHF9z{g@JmSP^i-6IXyhAWSBj&@x9szA62PJJ9=ZFti!oK7ylY|e4sI|X6 zDm44rYgpv1Tto`p^+GY{LV}|)SONa=5bBT=d{o6$TPp;M_WZB3@rak80qigxa1kp*`n)3i`l5aS6PdbFdF#a0=junG^00cFCi*&7KN z`mGbDx^!ik*U4W+Q95MLP&Cy+>Lt7 z=N^mDqBJ)V0AR;h;xqGaiR3O;&6;OgFMTOzFXD5E{ut5Eg*u!@ z384k$K~>O9|5THfbHc5x(J!)Ai2BgQj1;GXRt01`B0B;snXrbF9N)?JIW= z5<}>yXzO_;&fpYz3?|V6bg{}|d0_1=)YJF{Fb(shCZU~CEx{KAv2J2BJav`EWH6w^ zQx>E+x2HPUoI0vxueOWfuRT*Kg%3=L^EIWuJ#j0gkUl`zql$^{kn_ab`t-A(4@^#2 z!gN$y&1={}lk_If1Pcu)A5k=NRDfZsrxL^+b~wM|LcIEs@9|%08RY`9DcC4QU}95eS-Uaj$QJK4@esdCU}+?+ zahm+qkm}h)EoJtD)7RaU?kN35y8DVlx3*>qy6=3j zbp%Hay>~)JXv?|{dy1VfrTl$i! zXA!|>RM|AiQKd(Y5gZ)Ic1Eh}Zw?|Q9hp2a)setfsB><^;@n*_SFl}NiO8alV-X!6 znGx&nVEh|dQ)4xQPiC_EE|AsDv5f>d8HvC`bTA$5p+ZD^dSbvILTb@*u*6GQQ)EQ< zdwQ;k>N0Op_JVUKt7+_O(ycUf1HZ2a)~WUm7P^713OdKo%)Xfth7;5+tf9&4D0#}| zju;783x3rh7<6Zg;z5c9lM%L7qt#D|=G>-+nFp^uWHwf*?N%)P9REGdf^-26Wuc4d z9Jp;Ji?%dm6Mn5kR*)vEyd#~?6C3M2;AC%o7Wz|mLurj25D_~;vN2*31p#Cv@R;0M1~L)h6l)x? z#BWjJhnIyfNIY~Pf%^UkI96@S(mD5vyF|Gh>+Pp0NmNj_Rduc%NoF#r{CP&QhA>P-21jcbFw_m8UJ&8n+m zbga}jl-(4pRJ<%Tw-P33#A%njt1dH;2)r9ik7txl8?@lKi-v-QAx=ebl+) zO2)?xCS(g`ObvW6xGVJRl=wLnXx`bn+{p8sw&F;Em$C5odUoZiajFNeyOD>#Kfxv*0}gB9r{FZnlp6&75apOwDN)~u`RFsm1bWC z0{$wh&|@N$R|P=}^o-;ktjPvzs9fF5ys6BLe9^#P1`@)gEOziY5gK$)y)zBlbQDE4V zuXbQgk7YI+KeH+8Tv1zjfQ~-_ybu`DVcQ@7aiNy1Svy^Y_hf#!!!({9*#RO?(=9B! zTWNJsebbr#qBrt}>su#qOFHr#0D{t%R9|Jp71vDy=J9Hj{T<<2JZKwW&TutCa@wUz zaLO+BW&sZ7bAT3pdZ29;`!3Pl9G8KiYXY@x(ScND{H|pN%BCq%kyoV+?1_k@Uu2kX zoXlN(91VE89=%fh+ajGl@yqk>Bt(?k3>*w^A1yrQR@HSFuiy`vZGh#S#+W;+2YTu` zEwbY-g=iEG@$b@?qSU+1yDaX=P?5sTAvy>tqxyn!$-;H)(+Vuqha`2TsJ4M?Al5R*{=#o&!xKhH zf@RB+I>J2`X$<;-?CsoJ<+D$Ty8sFO$!N&8F&bo_!^HFbuDJs(J*N|z@$gGf77xvl z8O5EENWUg;I8Txtz;#)z>`%WIKE^$&@GC~}q7z9kP~IWABqzgM=Ggh@;G!R6_YpgU z&k5 zSU5!4TA#CVClq;G?{W*iTTmC;i224tpy_KbRLL0UpbFN0CI1fBZCLTU*5ZCKMcn6K zT3~>MQn|Y9lky$pfF(9yk3f#Y&o(7)T?N?rGkM9$*BhnuH{I@ZX--N4G%bVY5}i^Z-mR_QR39BCe2$l5T1~C4 zK?h*N%oI%SDISDz9(m{UK+6T`OMH{(2qiH@6?U;+V``iY(pZwkwR9HfaIq$AGprkA zWU0>vj>rZiwj3-W+xF`>nxstV_6@f>O=wh*w`{0cT`LpQQf?PY5BB>RN|2;hT*R74 z#0Tr9OprsrP1Xib_6*0cZRN*l)7Wd+3}rLqRfy@S{P!y@v)g@iT#I;)JywK5NOq6C+R*D26LwF84F)dqUsF~}(=IV8o zvOQM$(bl6`#T#^C?$R>!NsIl^*P}^|W~`Ts6l&Xnz~Sy~eODKgdN~K0G0#1V-vZU2 z{1Id600&Z!M^Z){GpIgZt|myU8t4$RCM17wCAu&!{55*SHzu0jPH1l~{96iH@|xU& zV3)W}l_a)-lr<`HGJDHSb%a7);t5p&|HPDJtP_@~1O+|EQ6#LSLLk_8LFdvF_KDO` zumj?THntAVaP6ow?Mgwi1_8?(XR7l%2m#2zPpEIy_U2uN59&5?QX?O3!- zVif)^=7m+ZSYUM4b75jbGd|27F~Ob*B_Vv`mhB_Izb%D`Fy_#qS6?ogYxy6hu;e20rvm; zmZOz{)XlA$pdD0jW`|IfZ?wvB=>YKzpxAoavD#x9@!eI6z%2*5DPy&|CN&tfIAR(% z9;Dsa@ESqu@~Cj?1yG|GK!&z^^w`#~rMsz0RfAoR7A1A9kJE(0B}^#3pN!;^z@tW> z)%I2sgGY}38OvD0evgD6Lo5s0og+}+m=55*!axldoZR&VMONsexjGiey9s!58mQDw zn)DB--l9=gSrQ52hU2?=LQu1`C5UBMmQUPn+FED)|>lGs3g;&vCZ5L(!DGmJas+w(GC;l?mCK6B~;W>1T3 zGsWKGhGbdSl4K{713`MIX^-@XJyCi&-C@O-B7$RglYhYJVz7e$bkUZ0qXv1Y)B`w3{tG1w!)%2c zs}|vs*}zS#E-_F2fMF>pnwnN+*?)tXkVpk1_J&x*$daK!vOxiCW@cSwk?%nmp_agy z%1m*V2B}rgftiPDW+uqTavuq}(VU=7kP>ELNx>%3Z~)ch!(nl@fu7ehVI6qF{bNiwI6&)ASc@nOp6^S(}Sox}H=@6+-5u>W)M z`ugkT_3QQQ>}WX0_nl+gq5bu3lcJ&Edw+lUeOg`r%C{9GMhaI~Dy^0y49T5NWN3INAvGdUTg#1;Q$uT7k5Aa4| zIfkD?ISW6Tai>Q*U7;}sFMF@k7VQ&DlKt@-*}bm@9xos&UdXWTNE$7R@5|sELh-j_>132uXlBdlg3C~<-)IO=>a%E|7LQpw1**C z!Kg|`PL{#Mev)u;ju759Woi2BV5xV+69yRgQzQpRPnQ?yL}x>$?d6&frID>U*w~ggY7Ge}~)v)z5kxJ%sNk zi1H>j8S!g7;T>LRv776qRRN8 z8do|2kk^p6JS9mguA4)kVO?nXE7Fd3rr*S401E_#O8)d%3RvBcEdi0{1b4!Y=sB-0 zJNh?q;W(zQXmi>c)`F)LYl83Fp^{aDHlD)yUl|5f(84kosp1q3Sxls0ru976*2eK} zVE0IEfUn?(gSRoejkyo)P#p{3(8XAsb*MSSs42YLkcK1HWP|P#hF?IX>5qxY zCYL1_LwETaiV!al8_t491xP`Z%Wz2i6lowR%g>WGZ9w-ZRp7{9D#bn7NrL+K@|*NL zi@W8WLY0Qkq6q-!Y$T3#T%wCK?siUa#7*#GpASsPM~2p;0HfJ&j!SwcPX;8@p69c1 zAcHZ~i@ps$$C+FX$~eJoK>|NBY-x$C915H-cd`Y`$%Q!mC0Uz!8t}n~MuR+AZa)0(#PNPC0!W8nWsSv^DorV(vD z(A)6@Bly5xN+FStMk(yyc+ma`kt-x`I)*t9)_eu@Eje^6R4=_pfr)iZrLWG z)~ph-cl?b?OfZoNvhmX_Bk;;3t!j*Ol|o`1x7UkS=)n+od?IX=S^2#goLIFZvf}K* z>@*^>?&~lIr(Q5KBtlGhPs~wr$JIcDG`NR(HOCj)Pp3F^CtThj&39|NP=bB z06YPxW0Ak8h_k`sy#>?hwBzJ~KJ(+~%=hE%jcJ1KTnG!UZwkl){Hf1APA> zUvWQ@%i8Z0l2hk2ZBw@0O?=P$rY@2kmyr|V z{Ck{?7(*1fU69Kde~gA`ewu~Y-o9E`8a5ix4K>Ir9;@ow{zQg`?&z3hpP!jGDB&?E zin+2s8N5$$=yj4}+_T1n%6}HO?F3k~&dEgEp0tQV@nDGmFzzrtihay)*(oC;anCyZR{f)e`wc)>=LVmxd~Gw@_2OEqZF5AZuAeOO*uwR z-NnZixV$PJoy&f(4d2m$t}vlefq)Zt?wRzU>$+u?=6@jmKUE-}I-VYeXup1en*RF# zRDm%49~H=Y#5yBl$Tf@)M4lIc35}ff9<8pctdp`->~aL{FmbXescB--!zPcK985qQ z{QwrloHtANQS&i^*J2)qVda8PxFn`Asb{s-YK`>Ay*SzgV|~mc+qpA?)CV=KuZH&r z+aG&Z%K8qph*brI{LmyZoI%_V*7Ff~vD!6?GJ2^!&Pde6-+YMB zP6)yr(Fc2yKN^tE|1=<}L0>jB5MKFIz6f$m4Xf}#%a*?eq+I7ZJt@T^FveYK;Iv(G zz2qtPE$ns@dy=xpUcPInsYaul+8l`D3WTSZ@9rsMF@L67JZ=XQu`=(8#(;F>COkKa zkejPH1{7TbOwCu^b&6NU+6Mfnu_-(<6L(sr=KY5bm{NZqOEOmVAVGOl?g73ABU_(G z)q{GX_F$A5gRToUB_aw2JCYf{6q%XxsctEhXrYMqBjn%72B*4O#S666wd{t1$()g5 zwF@dbliX-ft9}dtv)+r{a)Nc5;ql?Hl@yrPa!d7d0EavLzk<66s(DHwems|Mxq#nm zJ7ak?+nWYh5va&mrxKvVXbg49#3{wRt+Ld*5!yQfS-`}0s1RdOh5AMTW(}vGh@74X z0q%eU!(=?1Rz=)_H{P(}4pKPD%#v?KoVJIki@slURh3o_oP{`#CBr&l6t@nX8K%m6 zi`ydrj^rBX+XauiqnjEQ68=uV|GP9~>Rj4xjK%vTX_^LH03!{4natOA)`r}z3GA2z zd4F`IN)%WT)ep^1bV`0*CtiSD2wxN&NIu2UKGjxm_8*PZ5@En+s9wZ!>#Ac*239JL6BEibZhA&s_nYmf;rrNs zf3)kWu|I2 z7&vQz4Tao!JIjE@dud#@6$2N&j?cWLEs?IrJlBp9^??6#bko$cIhLt&^`P^T$hb4O z@RW?C)2mnFI-YQLUBB4vU8B6&?Ea!DH0lvOp*SV#;R=kTvvmiBjaUt@(k}%z^NERl zXVubvU3e%2QgU8eEaV|Do=E2=92+SFFb?~Nz&PY4e&PS+U{9;=9Lc4zwUo-l;bUAK zkjLOfBYfe9prcfJ^asJvq$;fiaCg6tl=85A3bL=-Mu>?qFUB3GMjTNjTkCnTg$ zGzxI=W??~f^jqH?DiKpeW%q;yTPPYXAxc;&MmbZMY0e>Aby7LH`u9`^XT=yH4$9?% zbzTR_1Xo*Lm>s1cl<#|6`}Wmy`|=`&YO=C3N%!me>JduUEZ4XE0xvhYQb<8Sc!gqA zV&-)LEcqN8q!Cv>vC=}E1@9av4$*}BSrivyfj|xvy!yhL4Crl2_tsa7hn~+WUiJ)0 z#pHN@xyRZzzg>2hjsA1@M+(9X4W4?3o{xyzr_J+Q%JJrrVfWkXpKoXPhBr@6@9pl^ zR*#j<>&VUL%kgP>hIXTS=K^UPJ9fXVPv;i=0gmnFn%MSbyNs0g%g3B%hG+e58@=y( zexfeljgQ6`&zGl<$XjE123?rYlFjkQ{l}xXnu=J0FRNg>k#qj7O5a^Ib-%YZpUvtk z4u*>9ieZY>dz;X2ertq`lH*Gd-_`AIJ|DZB?HbqEivH!xkha1#drh-r2|~keus5*rN*4$E5rak^L>iixkHF ztzYEy8p+RM+rJ*gmJP*~Qmg+&@#i#i1+i5~B)rF;GdWin(BO?x0}`&?lw07sN~KYv z1|{6qz?mE&;9jzIqfc1`jxR?DCv}4yz_oNKCj``!#WL#Q8&d<~m7WFRcWCt24H|pd z({H;&usITzqeky5v9SJXiiT{QsiIel@&8rT4JlV^d$7i@QSP9Z($?C~WG-Tu4WLC8 zJb~gF+VEJ4fgP$O7PP%AwNXrJL};6v!ZZiY_;rq2jV4C4B*?4ibxC{O8L~46|J!WJ z$<=v>t4)URa5(>R;A!VjElDIUsgo(FYF zqsGdHCW^XBl#?dl9l2p02PLp9={aQD*3vs4XXwIxDJn`pM~;-90+oUZZC}fcn?#3Q zWZ$F*HtI0v`;)(-b^^EnbR*5}qM?O2X!M&myex=L` z6C(AlU-AuBRIX+|VZ!=;t|*#w zL0Vy(tXnn`>T{A0+^ldr^f!h)7V@AeL^FkL)Tu_0f}d&Rjk6s%N}Nk|`GE7=tv-Vp z0`~R-;67L}bH{=dv+PwF0g)I7=>Ehg6wG1oDLlie!B1Ew91@G+(Eh9Sv+Ebfd9I<4 zCcEioAM*Z@78zHDg9A*8geVkEu{5*49z)9EV*M|YO>bB#MOHj;GE$1d`3}h{_V9fY zQ-+wwBWnCXzRor*kET=c3JD^ZV#M{T<2xw2>oScaSVk@&dE_d_9faDAQDLF+9zs-> z60CXHPLHeQrtUr@EPnr0MIpAM&IZ+S`vb9~yTVeUc$G=!*vty8=Z#D!@)^!-N6{X`)xA63d& zNl>^HLa=1vr8GodG`W?rmMnG6gJ{IU|H~L|S&vKJG@$B);W=~F%7q}6TW71ZFi;Gz z#qd-}*%gFJEF~XTF_IQ)BqMU+xMrsy?6Z+GY+29jQR&zLFdlCdlQf^2?QbrsOiF&? z*g9?5uLZY}0X@R=?sGt~h_FfxBq7QRrLKn7H)RG?D!U7CkNuEqSOBZv#6i5XA~#cp zMTZ5_`G@qzFjAQH{q228jn(ndA_r)#(&)$1vo5-+<(9pW(}ekIcNSc|6cZZAP;OW; ze;G~0GzoX|oCVBq>I_YKfe|{2-AOqSW9J|r{vfo+9~cl7mjmv>i{O2ypQe`CmcO%zW^^nE#zR4JGVK=!uIu)T)lZ>Ius5`VdzArL)4;rEbwp z`gX~z!)GYrrE9F`UZS8ji;hKk%8_u;?N%JUPx2JawLqRQc!AHlHd%-UJD#YJddg6`m63m=G{0M3072Cw>;>8MQNxkhI-p zN?Z&)Z-W)Zgg~~CVPK$CuZM>6xM5P^NxX_Ek&uAY)LPqQ!rcXNBcI_g^3pELPv9Z1 zpn_8QQHY{C=O*b=n)ni@4p-tXv59T6i9T~im_FDE;Khp4dSO|`vR)w?k|DVSBk^)Y z4EVD7@(TAV09Fa}g)daYNe>H_Yu~^iD{6UFEsTxQvgTF1GApKGp2CO5UJ5eMZ#kHZ zQvZ`^vVlHx#GjzmSeA?M1t3^*3u~aWSdzY*z0v!_`2xuEt0PesDgvo>0K*s95W&=5 z*yuZ~Si#^L?N#i745UKT7-&IjLR8plJXfSr3DbW(mOYoH$&67y&@e)Zgs8au48_aU zJ+%(k6633mxp6`z>C!yu1{kSep$0Jqy_J7fm*zd%Z0TU>Z71a?SykB5CERL&vyKpL ze~N$su%tP~*ru7t3Hz{Q3^|8W2W|odfwT58(Hx@AP={ao?{{55vUfL1sM6!VBi1X` zGtICUk&Ax-N(DCzFO?W`ypG`dx?0Mw&ij4#iT+ zoC;(ZG7Uj6sMd%2YkQ1;^9GseIQb@!ItT;NOruZs5_&;mn4s>Ctm9W5%|fNB%g3$> zttH`wh7fFdR2+!m2z(}h(mhElLSY3MBa@N!R#s@63^sSn;1?jE%UoxK8)k9f8LBFU z^Mks77sbpV9-Bnn`-n8kpIJrw1_bU_R%g3*pw4ZFYgjJV&|SS6(wmG&Xo~Cpg+1>c z*&Lralgn@~$~o9{yq`Nb6&eVs$$tO?$|Z&KlJWZAPf7VBZ`d-QJ-{gDE}rWo#}hXX zPXPjQrS*pe4|Bxs+qLU+{)(H{VHwYTNEd~??Yu9q9tWZ?vzjJ$h4-G1YNMsE1)J$8 zIdC7eaE~{0XU;B!%(nw%`o*CSR=ArU@vaFdufC~measomeo#l>~E* z5&?M9rJdlw3|2-os*G%c$b=E14_5lGrRx36=vmv4<)C^gKqObBL%a>8bM2NOyJAD09Nk z?>HGZkqB*M@1rWF{qJ3%q4YRy5LCq7p9ZT07ynN6)sR0G#T&>-sY?z?v}wgLT{0HbqZr7 zJa0cOB#Sjdyi=*YG049PnrK;Ca|x=BzHl@ZlpedDPgRMb`%CU#iVKZC+A~-)LCkuP zlwW&)xg24ue`iH^@*MM8#f0(*A93uo5LlAhAH_O#mIkG6N=t}k2{J>naGK9A&GZDi z28+j}mxwB}vM04v7O0SUPLo*D+cJhhL$V~>M{$!}31BjVtWv4a3Vb7f?~NeW^lFsc zJdF@I>Lm@8Ij>AL7t8*yV3;mo?Lzj!USS zb+rmIUTh%-=i)|9!7_C?$b5w8|LN#_0`B&cgXaKh=HfP{HVKlmoL^}@{QJJr~7_qKLGYE3tFZ0u5TXl zGsqh0kY^I;?->OGDK1Xpd~6-y<2t1mQw1v#!QH{H>;eG0eXRlj68x4P)(1yB(LUan z$Ct&lSC(eFKY!h3=pQb)F*o{v!s^W&8cn7d9vBK|pRo1yJ2Z#r+gY);q&hkk+K z8hNTro3o^PH>xTeYAytH-=zez_*LD~%=)AE!W2Rp%XSg<>;A|c1m z12e@maWi5+3)*(_{-bBBu1;FKpl}GLq0+yxC&SVGpBJBTN29x5a2~%i%WOy0V|bE} zn&H(6u^t)%<~nWNnSI3-Yc~Hz_34q_@4uR4@-GvP2iiU6cC5Gilk6Fiii;i@d;I+~ znsZQk+0mWM_OBqo80(5L3YNo>*ccO+6C4V}91&LwV!%niEhIIkyM`(VEU9t_j@q!G z-Rcwf1Qu+3+nVeQe=*!HSSjE0-Z2+zWXg$g4YCv0tK_`xivwtJNn)DKUbMaO_CH?s zpF$4jijQopknKBJMhEbJmMH&EUbej<4F&<@*RM(U|8HKF`G34@;(E*vAs=#0^N1IB z-J5DJp)g{}0pVg-UOsF!oN54OY{HXKS>EL}3_q$TVp)*n4|MUJf%X3Uu}j*%6KqAI zqECc8NV)S{x$(pe?d{eFMNZLx_R?ytEL;5~3Fo2q<;Bof#nVn}%H|hfOQp2Ym6D0Q z@-QJ?0lG>bM|ER^7MZ`zB6Q$ z7^|WztwBw^t6!Twv9b-vsteb@uKVIpt-gpiz`ODio|8oCX=prxWVfKkYZ#k#+IYj- zgu3rl>!y;=OY9R9cx9I5MLE4DA{h11#+^lrN13O;a|bux4SzHdV`OzU;h+6~UXyjc7L-(kc&Ogsji< zn4FfCM)Vjl?4p_A20_j4>K4NvW>yp>)Tlhj`v$hq{uuH?Se_4G)Mg!8Ho<~>5{8+M z8MQX3PU?$0Hcrt`t*_pTY#@?3X6|pHL$<%;c=RTu`V>D^?N&ChLm?t^ygR$Y>a|Ia z%(Ooza{}pDJ?pnV%qt~;6WV-vqdCjC+u0B3|1#4l)KzpmXhP~76ZyJHp$r6tbhPTE znq0|nRJEo^qNebxu4BEE)2z_6GFyJ~lS=?OK;)8=O8|$A+k^`=p?OE*Ij2A*&A4{q zXw$BlzJ?DSPkA5znmj+AugN%Os9sn+`Y`-<4)OC^)UokZ_Vjx``+8whcEO zU*d6Aj%!z?YBfWPGOB3Ttq>GnLduhET%qR`C)EA1T=izZpPIfs-0R^TJVHvIZjSEx z34GW1ZM%N!#<+#vn|(WXzuvibd2JAE6J$^4eD5xGba~jvK6~CjJ#@T2K78Eo|M}sb zyt%VuQ`9ruQpQ8?D>5?P=Qmq9eK>i2xcRaoK5vVuX?z;KZy#)OUAJ;}zPAbJ-}m=-@xKnepS?eJe7`!lep}OD!=DuM zWdi|YtiZPO@O?fESNwq~1w;R}vfH>&AnKvL)fU`yBx^Thqq!oM(oZm0>7S=Mi)BWs zA78;la^k0B0eZ9QPGWzwi$A@2db;~EKYZ--jjO);Gr0<;=h}7%e>!Ea|Fi!gDDjZWVt+=}V z7U3Q8R((RvbG-&NyW4O7CKzmu7}OZl?X>oSUfVYXXnxz&CPIv(AUTyA8wE z@Y`qz8Ac{;e~rWD#{H8y1J?JoZE$B#l*_k_6>i$3QqRB2cXB+v1r94PZ zVtO&iuL`}BPzaysrIu%MU(TK#+pMaaifRl|8AkJm+EmgnIx3|wxt(}dZGZE zZUm|)A+A%7I_rl244&c+>z?v?^MB=Uweds)YBi(-xz9kT2s9LvQn*n&ybyaFM=h7z z;tf9OJ4pQ%xw3Z!6ji1fpiW}^PY=9+Mh_!tH#>I^QPmWeGdRi7;asrqNRrk+@SCNP zCuStfJs8dIj~#x;LAvNnZF9)*8dSa*s7^ALXH{)J-PK>J^u2_p9?dT!V@frP#0}6{ zUR4WZ2xlys>e_6&je(`0G=^lRI%uouz5dfi^W^%Ez?)ydlx@cL+Q@F!o=KYn)M*>g z$z_A<`Cgr>Iweb-CTK`Bx@c{zEbyMdcG|dRQC~Q+Scbb=9dy4?=z~N>b<}1Nr_g1C z2iYX)72UNdLDKm?MsFdPCI+?Bp>N$&wc;V=l0;Vx({~&!p*bv;DJzTYg!01|Mf^iQ zuK7Yy1ZNGM(>crqRKBH%kS1&OuK(UnyW6(4{#C(>A4sty2O#(>#+vm3nQcB8o+zWlB*JBUl z4Bnn_;eV>lZUo7?~*n{8Y|UzbxOp`!TYf})d)Hjxlf3_|w3yYF!|gAalq z1LkjbAjl0*h5~LnJm8S|?0EBZaQYdY9ujACWNYfr&QlZD#{s5vDTr_|tYUvIWWuSr zY%OE24p@|fIAge249>qrCGVJ#Yzbkr{*1_HX^!gSL0^`@$ZQJ!e&hU7SHcb)DksLe zF(SsY5)D&1jv@*-c{@q1*>b0kUI{(pFj=b05+n;C4w;9&5Lhj;c=k&A{U8n1BNh@d zvLt5~X`YtVInXpGK6p;ro31aSg{;Cj+i_l^atToa(k*xsM20z`SPYQDQUlWuT&&oL z;4o$qxXjj8m`oBxkzpk%qh3Ks4lFV_vSiL#W)fI@OUc}N81wkyP0H?Su7w~}{c+JG zUe5Xxadb2&MPQ$cLkk3jwNNJF&_}9byR098xv+!$^LFL&VU6JQoH2+C_c2whDB!me z(S~baj^U(+B{G5{#Iri^x==C1rQ25XJ-WwdITRbNH$sjnM6+`t7G>NA$p?^|(*+BV zC>Lz7?Cue>2z$u*L~htXjGJtoX@&tpk1CxUaA}21%K5&snj(oPwcs!%LdR&*hWkIp zR~|R!V+a>wlOfa@($>D(L&Kcx#VUv_$AgDy{g&HRfUuNRG`-YBGGpuI4*irf%r>5&L1K8>gr8t&$~9o-Q) z`W4RPz-g2^Qj-nE|7zg?5_~g{JF^1@f0b*`alo-QT1rm*3v(I~-< z{J3jxPXNp3l=cK!DI$uC;ftP9ae}0Qgcxj_pJ1HS#=Y~*<|PPlO*5r;aH2<0sg)^K zFH!qj+@XiWm{L&`o7zDEnW!cL4nrzfi{j zlC5wq3>u=OGbs%^%GJz`#B0Cb>@hHu*cE zn!o%LtbQJ~RBe{jo`@zyJJRmkeD+ z;z`BNLmPm=t~VbPrZ>0(v8N03tZh-vPRE>@QD3k-@W+l*jb-GdF}xexoWi#zK4iEv zL>72(@auDcyCPNH?-c3o#Hkm()i?D$CE8Pp6?K%EFvTk5hu|vrKaqZt+ajtkLg=Ez za3(T@_1Y*B@-t}(%}NHyfe#UFn~sIlkBay<2_Bv|*FI`9Vg?6_FEzG-Ifp;ET6xM5 zQQ-U$IAr<0F^_05-_7m@173urK#Kty3*Ycj^)2+zMk3rc3tCu5(LG@kdb zOXm=mb4=NzXL}1!P#=PBL;_(f&}M2u4m(;3#DpfZL=yfHC*RTTZ^1rBqkR;3JORav zBDjKS?&C|G;OZ!IDNaYI_sGvQ@?7aGTrBK9GDmK@U8K55lu@8Pz|*OdzuC61a-)PK zOcq8iNO4VnL9LaCmVGrzg~`3^z_M#p!fnhj{wwfHYN1)2KDZZ{%3TGalRB`xJ+a!R zJCrfdfVI)gh$Nc4`~npopRVJM5D6zXFA14+u4GL2{bn%X{t!Y+ zGckCRJl(~0^Xv)vmlVicfh2KFBF!zKlke>A{Cpf~D9HCpPP!&!IIFCNaL9I4S@Q-= zloFCPRcP@unT_zRG{JOBJ%ea5&U3mF0w>*~y(yso69(%ch+ou;2FfXiy%}4Csxysz ziGDHKn0LzP0Yu0(!SZ&#jQ9|)vT^bB-IA~b=X9Yn8a&RC5GR_HXrUIlzvZ%j7HgQK z)4u7)LiS>Hys)wr(o+ou*I^0J@Sj0m51#r%cZn3L=!7Cpn|ow8)vYDf;8&g?sd?~? z&@L{_KgAz$S&!4T=yM)a(N1paS(j!n5(&|tTYgC^gcAosm>PG$o=e+!$Xa=Wsc{x* zP%Kf}G*W6Hl7qCpGXbZSHWK;Dhav9@iK(uwFQjqr`gsE+O-@MNK#KAEG1>;zjz-AK z;%s=GCyrt&*ZqQ=aHHCJSFvB(_1pbvVt$$hG^=68oKv?NJCgbN3cKm*kiaTvuhSgK zQaUoL)&!wss`;%Gbd!5`%fbfDl4ROKh^UjISX`Wp?75OmlI9hCFa&+UJgZCrNG(b% zFcQHQeD~TI;$I}%6c>)+YSL}?F1e8jQIPGA=9Z}R7A~jsTxlUygpXl~QsG9JL&%_4 zL0!AmLu9%4h9U(Kc~l8ne;c<4>HR(3^}8z5`K%#DkVDae*NmZGrLv-?Ia7R4W-LD= z0t1h5rMYw^e%Tl2Ajk(;m;>vJ*?y|zJ+AA_zW@QxtN`X5&aTr9_6!tmpov1AfdS0c z2&^0eMJp7B07BWDW_Buh$;Exh z2XPUN14ze}mR>W?AKCz|cYavRu8&67yp5oYj$EmbQ3}gfR!BsV9jLTNK`o=M^hKys1P!B%~7C{|(+fbp^GE zFeRe=hc~z^{_zHke!cMrEgB?BxS_3O(j9Mrq7{(e4lTbY?|JP>)e36UnkIL5|;+_7tm6TjZd#@<0{$XG1x=(4z&F;AAUD zn=Zm*#!hF5Ie=ITDR@35-U1JIGz4I=SQIN?K!JS(!%>q9=#;jr+ z<@hwukvWYbaTgB$^)rsAH@e`ZJcugQwha(uELEh2B&hV>SjvphKfm|7O)>_W5<(XW zgmGLsFgaJZ@k`N(*VV~M6h^9q)ZcBRP}~+}zK6oy@nMvCmwn&P@dN*VcQ+aq~d`Qoh63Sc5R z1x%{Hu$F7b{IH%!AQ|XbM+C=Fqz3dGJ2xl0e7j8Hvb6NiJ9;feO_6D_1$$#~7)tKG zv^@K~CyGsQ0q(hLh(^m59x#UuD}$Do+H@5> zxqQ*FD9B|O!Gc9k_6*m8{d`22ROm+Q1suoRx&av_-{H2pDCScGfZ#W_TEwGc_`t1?R7x=GG=x_7kYx&sC&S;_XBK$b`xRV-xidHVg3WVV7Ef3(2rYi1Gy)4Aj(7#_Y5(MhcVcu;!qMGjaiT z64IvNlxV^rhFen$5teRZ$BrUsd6&&?X;WM)xkrPf0t;i|ODw9V0?0NAjXL0xvT`|+ zrcAfV8=Yg!7Q-^ftVrVLLapbALBIU5;69tCv(=@~k5&nbV6EqD%XyPNRPvuyl$^wr z2OF2%%vmmVNqIx{i}R~2$(eOo$s<2#Y zhk#dJe{nr7Hm)9@$>zDkmhQcs~)xaW1f)u`*DBS*Y`X4ak+;7ujidC)1wE^ z^uFHL$Km+)w|lmyx`Sy1`Zk~U>&0p0b=TW{2|3th6$b3x4Id<9i|cK;OK5Pd0I)Al z7(Ll6Ji;uA0zf-#c?Z#qUx$;3Y%AN6tUG-OjSe1JPyn8DGyU@4#*Mu80) zHmtqYRC0s8FPiK`;&21c6`as;aho@a1>DtCah!R$WD&l8^bwzEyZ|zefx+nZ3gS@6 z;wHIB!#N!0P*g)*?GEiu2q?!rjAh#=lOCVv2e=n@|?*$dpfTbLI(f1Y_&$2DSQ z+xukr4jsudPZ5p40e!>D&Uj|p>AQp6Fhpj(3^iQS#acu-BiC9(INI7ehO%>j7>1S` zKQn`QLz(G))I$|O{TLb>=n`$=aJg43LuiQP6Ke&fW2QZ=G5k$X)ouebf2#Wt1(f1n zxqCWy9cZ0W$03z=@lbJ(9Xfw8guOxYBwjc!$KFky5*K~}&C7)A4V;>L_51Dh3&xey zQ1s1Ifitw==i1UAQDfb zd#CElvJY;X*f#&RppfCp7jMmM7fO?J?Z5`OXMAfbE_1X?qf(Lz&Yw=nwlm9%Z*R2* zTY9}mCbiCqU-U+AZ*K&hGD}?oW{KH%rOdhZm+{kS3Z4WDn>f<8AyD8|oEPsh*I01~wNV2z+h`HHJAi@LPdNaWs;3hsw*zEK zv1i=hEp3`Ur0t9Vj}%x@m}ndr2Z5uN6eO!f=W79gRfdz_+D0k?flu28R+tm;BNZSVWTP-vJ*m8_t^7@B$ZrP_dwG(Sx{a@32F* zgoD@$sYO;#^EHK9%yM~Dldl3px*sM7oTQrM#U#H|owa=b09d%Y>6#TtB;)cCzLYWL0p_2v(iJxi!_ zjtX5fS!(rCRl8)KjyI@}C}5_F1pS$}wJ-1f;Y~NXMte6jQY*ts7!57GJ)fRx?+j|2 zwyL|Y1%f0Sd{L=J(YCQ7SgdW4+CAH~bLOVvQmEu?nX5BKuZ-Oj-sW80zf*PfxqQ>} z&)J(#Zv|I^Pp4w3N0HKaMbnv1%_BqAM(rI*dSNrbJ(X_e_doYd|Ffq3Zh*Z?^b@C{ zZ~y>c{}qj$oIR{foc>YAKG4{<+v7m+;XnNfrMPB+Bt=0m__Ze1GPkuQbt(C7833%B zjlo$A{k#49bI29wNL)h5K&|2&SQh8^N3_`yb}a8ltyJk_h=-Uu*2-42l@>?zN>X7% z&(}Mt^`kX4GDSAgm=ey4Rn{uLSob&E#6ZU0G3QVp5TXUziW4Ck@bG9pr<{UoKN?3c zJ>xzk(#@EeT=NcwEHfv?AGvjx4!HmcODo7gl5%1a!4H=I&T+gVU_Ta4DPo2PlVY#{`r$wP%>m~0^J01~J-6;kxJ4+G~L z_k~}e6$H&R+1ieS9kO_I2l+vw67dFp>5S=;A1QHZTh>pCNS#p@U2zINluE^f`zZPA zO0{ylP-_K!uw>IUDRHZwT7RZallkL@RKImZSojPWb0ZqbsEZs26N$ZgfHBHaY%<7W zr-`#gg$T?6-LnLtQ~Ua8(EH&kjgWJV{pya`VvbqTU1ta5uQX#1dF0rDZ2f!KMOwU?TSDhiXjq83XdCcGAmfVO4~Mfs(GEm`6o>@Vgo zdMLmt$|qd)1r-^fRKYDkhp>EBlicyw*7MPa>Yy*Kdyk3ST`ENqnYlW7f9C>KC8|ddf0Tm zpzbLFG%cF*r$@RB1N^U|ykdnaCKK!Jp@^fF>*u72IgoK1I(;SJE_*V25*5;dmd0XN zO*Q37J`iIk-l|{PW%eD~yM~P+G%fS5Vr?*^HsLht3;Xqr2Zo&sWVK}};PqCL)WTHM zVP2D#R&~FqJC#W~LB{F6;{av) zNx=9UCgJ02l~6Uker9WW4I0D?(fg8GGx}m4006&Q$fT^MI9}l@=*Ax*(vZ)oexqK` z>cj5i1U+;)$QcLTq11~bU#k|XZgH6@wVkW2e>*Kz*BP-hRIH)niKpbgXUXlS!nAiF z0R;e*r?yf(geXf367{FkdbC4;r}~ZtFie+E`3J+4_%%!xNP?NaNI`TvG`javC*NcD z(?Vwfw>I6WcT2r~!gS=~Q%x9^Q&Ar(hqSV#2&;Ld5{~T%X>M#Ppq$q1&glSE3+^?! z#SJrCrz4S5bXki5xWxCAXO2dx(+7fFE{7b9Fo8h97p$$7HL?AQAcbcGeuwA4-%l#6 z3E`Ga34%%(8!IS*ql#>&#lP1;lV6w_#Hdw+28=ni)VeII5zuWVCssDY?(sjObcO86r{jFktHyM{; z5U3{#<*OY-CC655=6){P!8~(Pf2}HmZ2sbZJ<^*karJbLby-bhYh^WSvZY0)trzP; z1XC;97+&3|F}0ah$BQ-jUHu_rO>P*l4<)awO;T;2Z%Dz;X_AmE55wVW zg2vksoFS&3P+|!wkg%sF=;c?d@elhl?X|$2m4`q&cc%HK?3o0efYF<Z%*bK*vh0=pmi7XA_8X?e;DjKapKUI<;z6x@o70OH*BH@s8iMIO% z#z!=pMZD+Xn|mxq-tbC9NxsCGI%C>)99J?>? znd{v)Ssw>mFGnqaZ_w+Z)}hG5yYDW(5cS8om^k4JuHB6`sXHN&L1Tb!%cb67FvEis z$5G2Y^il|o)&(9j{5=c(4M`LV8?R;Z;km?LrW(DKcFw+Fw!K*2Z>Tvl-IitB?<6&p z^YADT&Q_Y9#<_Srdhj1n4iu`g1PrjvUO{nzoIG-J(j4vBzq(sksVjwC;a(`&vAgGt}Y(h}$PY@G7?l1d^cT-5Unu+ba#jDwb zvUT6P=uocevq5iaJ9#^vR@Nq8+Eb{WFX{rlL28oq-dT$7en0T@>e!BpZ#t=5ts0U4 z*+aN-rxY$u_B6S4U9YgZxjEuZ@$G%P_sBb2qeABQ+m_SjMLQAr#w2*Qt8UcRoF1G4 z)!*H9+!_vF|2V z=(e(Tnbt#XVZTo+v0tP{__I8&%*s&Q%9Mv`&L{U!9m|}pSCJSd~-t47N0peaMqCdM|Toi7)Qto z%Fz5>S+1ZZY!<{3RK*(1aXbIlBYqmx71Cd+vL#5<0TrTq2v9i!!W{vNhWIb;_V_U* zPR2beqG$oALGpMAgDKgUXl(zo!jhOTPzmiYx4iuxVDeN-J&V5V9WlYER$#ZZVvZ{| zU|jt8j|Kf?g0EY57x`}NT@JahVhIt9s31u*-{7}f6iOSS2A0U8FJub=NAQh+93cr3 zAkdO{2pp)LPXb|1CQ*R-+ayBQ(wiu3&sGbFXyhpy-{p;Icw{g<@j_!xbR|U}AL4$H7`9nC%lrrges3uX9|o zo_8_C$ZcOOJt!PAbgqib$%}c#a^?;uca2(VJJZVo6w7F`Nl!OVpS;Tc6vVwufIh*| zl*_GI*-@F6aWZ6T;n8p{)?LP5qSlVykg!8c#NTPytF-WxzNblPoDhXt@m~wzS`=59 z-PJLt--3nEpwQ__YD0xm)Yx*+pm(Xn?RFy7$~x-Hg)g#C?lWJ%P4++C{U;xUyY(hL z`O^HtRLdsJ={hc6f(Ml($xz`y-R;h!KyRlr_5-cA8>zMvRz0XLIu)5Ce3KZjQdp%fr5pVpUc$z0|lTt9&2-V^Y0miJyL z$(2CHOEo{F`4Vt+Ea8_QH}NPv4uN~_38g07!_GRstRxrMW7{u^rOnu4gHzsDdaW;u*>X?GQFA*u%YKj-^DywtXlv`>Hzfys zwiC(X+d)xV61R0#Z>c>$wiBAIkT>{qVN$((35N%V8oYU~l0mT0YoS2ly~;b2Km1}E z-EVMSg&$kO!L+)xZzjh3chxBJ(dmdo$Z}pgXg;gIYBP1;pt|08Dz|nj&mB6~p)Wuh zr&4ZL34~~98T-alT)_2QTWn-usT$Pk7Aw_l`CRF-?3|@18^~RE{C?iQq(0f$dvBx0 z=R&(B;r`lO5c>OC|K&f&;wZ~l@4r7WnCzz^llWh;*x1p)?H^&7UeD3Q+KK+(m+b$@ zbm;zZCn&ZTs*eFdH)3za2{_GZGi#)?9wKEND-2~Q z?xK|_7Gi1q5tEW#el-`39R30N`gY@(+yl zzkmF{PyPk8YHr%DbD(wU>2LebOtP(T)Zu=Unlshj&qK~w1QuUvqcw_%3sEQuE4gm; z0{pInuU`RQ@ecJ!@<{lE`;lcRmTiG#kc}J-CYDC=aDU0py~g4Hydq1rri+?v#Vn)UF z7~K%Uho&5+l52u&F%>)BrCPcxo&Vh~t(9l>4MwLsWh%b8wN<*T%UhuJRW}=1aK@&y z^nCJaZnvPre`ohnwhYbHsoVM6fgpe(%gZ1~lt)#zyl+ z5giRgC#JU#xx|T0(nm&0ao$G01{-KNVqA*K+~SabV(pW^g}5+g;qQz)otIOqW?V45 zy68=SzWhycHXwK;=zYj?a#~E z)%$Xt!pOCEi*$&~yLCQ1`gTX!KQtbn>Fc`dFt`Qc_QQNwfy5TaR4$|Ox29tPgyReF zh4dEe+Am!8+NQLUQOu0{NTTiE=0z)G?K(sqcRuCKW4|EAmFJW)Eb$_9nK?UPU?OtY zIPrzn8UQ~cWUN}IlbWT2*xv?Q%i>7)A8F%>&4I4kMJGhi!WDe_w7_%T0S#L(ChP4h zC%&>O<%BW#c1L$hl~ojVT4Ey`SSjt2Q8G|W>a`W2uw<*$48WK{95x2R)U{>Iq3sCe zQE=dS$(=I)jGGX!o2osX_Gbkxc1rHQKf zg@_{1PLM?^o#7bqUCZOH|BQssT;`xqHxUum>(trpZD-)_-~V2AASN=w;Zf;urhxvO zbH(5Y=snjitvMxJmly2k?4?gRI@$!i=;;Kx{Do(Jy(=NPt!wSG6El_UQXDm^dkMG- z4}+hZ29)d5EH4XrjR)c&UbC~CE)6=K#LNXb&s6onOKA#M9|}mG9!)y!Xz)>$8a!VA zyk%93zSfI=;iZlCCjr{_r2^izHsJ`buRCNzZN|frkWb6pY0|5){=&u5*&`XP$Y>bd zt{L;USnhP2@QZec*4LC4o%HUrlhhZ@eXLRSib@ZAMI{C4mZZ7`B6_FXhv!%1(%hSx znSD+XVoL8ZMBpsK(E}OUUF`D6vwAL|_hNjte_t^fGIN{pUU3g)`C<+m>mY{>WT3mQ z#I3%wOY@48eko{Tcn#yNV|;?&47}h}MS?8iV3w(*Dd9@}t$vX&04{OfCHI3zC?M<1 z)~4aGZW7c^gAyfSy3Ua@t(~+%aFBTZAZsn0(eInP-&z+c8Umml2Qi{}-fV{HCI*aI zP*;hzS#$v8zUk1vLeSk6A#CyJ#O)$=-X<~^N2=qO z6=)H9@u>R&Lgv}1_vJ1*kMbOh%2z7XNSiSSoxz^?VwON|2Lbm|+Eb>72$=T&P5_SQ z>Ovb$MTo65nB*DZKp6HkqFPZ{&T=(x0MM-2GMzpp|uLe}q4vUq+WubHT5w!y`T8!Hzx94Ohb{!L7gFdRuPfd`%mV!?vp z`nDsIL-4r%{yAH#NBZT9RLBoGXG9=DF{u*tp#1TB@{=vhMFJQo6C4n%M6j!kW*};~ zwDE&ijaCDZz1NNxfhSEm>cMnBdPP+gNJ-qVt-uUOJ0crGk%0V2OwslR*hK8Y5ck)L zh}C(RZ#FggLCv=Tx(l0YO;}3{QEaRx`*8%7!bo05xjpK7UB`4w>YLr|sjlhQCflD8Og8^i( z%o4-6Wv2mzlsiVaCnA$e3rUP3{Vxn$Xsag3N>^5F+Y3JhX!|{`eMp!q)%=t&9Fo!3 z$N38=k$nGiYI`{ZW6&)>A*j?!iV}T8nec?JUn;$^Bhsf#wbLPJJ-e4|4le|2TG~4n zm5j;qb-lLnc(yd=3KEFi7t!+Ll%XQ^q9{QsgDLGn5r$(!z{iqpl4wa!h6gAf_{28` zD3-Vc$Ql>tI0g=_aK2aw5Jn8dF9}da!^eEl#VSp7f7}n5P-GM%Zt|td%jo~~sHaGl zqQ1DO&R^9+y5$+L{k|TQ3_=Dwq=^EpBDkyN3N5y`hI+ppS8d;@jpwG8X-XtukC0Rd z30ePrpo%0xO0Do~Oj2;yT7uhaiup8M$h&m?0umrdn4vEa;t&=QJd*0yQC|f>jfSnw z9qvN-HI;VN;}b^qx#q)nH^WP-TO(8vKY?oAs6RmaleMf_Vp&^?6Usr)Ldw=6B3>V7 zf6pu}BgJ?DhN|Xl1m%^2Xo110J={$LB*z-uyW&jk*o^`}h?Sp~IY6wo=oM*S;aD#{ zNUqS;fo9$wS-&l*L1papO>4!p2{RtCvF} zW(VN3hw>DcfF_Vubq$f@v^(sm5jQlCOaoFKjdBM*$+3TIJLs-ol@8iduEtZ6^9-o@ z)QOJB^F?u6D5ZRW&KDCBbx=JO0dq{LVKhOvwldJp$){$`n+X%JZk5yH1)N1Bmg%rPyr=FQCveVQ@(f{QWv5szHRwPzlZv z1@?Jdu^>q$v^Hz}MOkVW`dp~NBG(L5AgUc(qBmwzbJZLTc&MV39PQ$hU1mTY>+$xJ zL&*z7>0AC*thE>2O8@lyouM_3nHX-I+?Ep2EimpMDz*?1>Vn!~qD%r^2ufty}NOb6M;J zun2d?gns#(`Zh`-BLnJ(S#J@{v(dtbOU_w35oI8Ub(hK za9aej4rf>#Ae}mtG$|}>oMzCgO~Pj^s9)iYtwF62p{b?q(AJ268|o**KN2z)(u=O6 z1dPhhe6xfJGW2%^I|8`=@aBi6N=va|hvmjbg&55ehxL>$B}Iio7=*>Ucx6|}6xoU7 zG~4KAsXq@j;8s=yTB~4PvMJ_G_+Bu{*?`j?HqS7ZN zD7Tm=kM>BfXqy%9^Pk%4{}U%H7&}s#|Jjzu|KuhF|Jt4#{osVo|CO2i^MZ-3{0Ez?=C$)oNhw} zHRIbP7-v~%sKkQVlGUE!!-pFXsR-Eu*rb`B2%zEJudADo-W05aK{LhVgn=FdP6{hyn+(E(MoB$3nk zSft_pMN1!16t;iNi}q#8bjavtm)KG;8(h@5k@4QX5XbGnV7QC9V!_5dc>LNc+tTe; zW;^=&lrM3_4=nC%3oJWJ^0lnX=y6*AF0(2WtTHLH?lfngbXmayey5y&$8EctRNNK} zZW4#u{k;GG+aMm7bF8sHBvfq3|7QaCPp0XG>ZWZb14>tJqnkiii9SP6o&YM4OmcZO z`Z}=s5DH6A{3TmidDpu&uw;4hoNCn^*(DigR{!hg)N8`xVb^sf_?YN;2uKe!y1Fw# zrDbRbh}XC38e(nGfN(H3+YAVg5+L3Dbgt@q#4vZflQbxXNF^Tfj6_>B?vGB_le%^- zEpAei>EB@`-aGD}Zj%zB=j{D9+mkTbl2#>O*2V`-f0Mdr@4dzJI981CkG3S|2RLiqQR zt`pD&_u+&hUGJ)f?tD%sq6YR1xXjb9R~%bxw3M7zWs63A8bHObFz|YEQ7!XfBa{8K zEo)SOe6?VQ$$qkdZH+vKAx7<~%R+JqF4OBEJjq3=prYXyN|J1j%*JM*nrlVJ;=wHx z>^b;+R(2ReBL1|CIXb_XM;N?7owm!lKr0SG4LbYJ+DV;tv71;BIKfPn59jOE3-pm` zFAcLn*)-`?T-nEM>@fRiXcXc^Bh7XSsfShgeKYCAK3s;8x!7w=1NP?;b(!ykPt;8v zbh~XHj=?)>+SYh+DqJhQffnL<&*V`j7EIz5B~7a!S-E9dPu$2Ehmm6Mzb_++vE+ln z!=qUM1-Be*j0!1x=yAUxOs7V?<-s|b07|k~A!i%40BP{fFZb0@vlO^bSMuC2LNI43 zd5|M?Dh$s7Cnd7~1h_Q>UW0Aj+4s<|_Hz3c&JsO*1MrGeYhK-_3xC6ULi7)U&kElp z;LYes!6T#?qzf<8v1QJQodc~Yq8+$4QUowj5Kibn<>JZt#(krD-ta}>!Bm}mv!2TT z$3O8b2osDUIRL=CI^chmL;mgN{(1Np0+IcLK&+%xUBlaN3DRP$pueXbeaF^ii&d&v zqI3O1RZ;4IpOO`jZY)67bdGOKToBvLCO-u~2|W=%CFk5eAR-lI>a2uH7XgB%7%m?0 zb&O^`!f~4D{`?_>Iy1neFT8@$*e7h$_D}grL-K7M5f8oj~ZJJ1WipBXrLwy8x+COl$VLe#6cMVtSvYGpywLV6D>I<8^WIxPI?wFs`Y5Si*<$*d#-x z+bb^j)$8qPbbD_;()#Jvgm0`t$+oYwHJvZ_SoD>MZYJHke0&tXxJp9lOLPh zVyuDT*dp#wJDBF%o5wSEnB8oDuVtM>eyG5m(S%X0aw3zTjMP`lG3M4nGpXiap@QGm zcyw}H`zxI^wcDO$@Y-vUl`|*utn_GcdcEN`KuuP+!|N+X%{IQX+y^^$>lUFFieAnI zYH^uhYis>H zm!5vnutdYFk1VswexJ>ipk~rg#pkai9VYnHkZ-3hPD#<4Hj5o=g-|8uuJGue0_IbOZ8Tb zU6V@@&-T5c`4@utCjb=KdnR?4p4j;1)< z7adIgdVM^Oo@gw@4iO~|YxA~2xBVIi&9c*^m87Fjw0O;@I2>YZR8db1|!HmVYYzGC2I%0HEUeV_c!uBHj!qM&)*5JKj@EUJB%=V|#}jvq z?Yf-WlNh)Xz`4QZQQXCPoUq7)6F=aOJC_OE{*uub`L;^UWr5zot5DBt=y+}dwcklK zUfo#*dvwM6)5!Sq!w4s@r>C*TTO`J&FXE4WpiytyJ|(|tt}aI#{8%Wp}_wIfv^OylS^D)A@a4wJ)Vl5=a3sNm8H7J=8zD+zl(Cr3KIjzTq z4Xh9vrqCc0{4%kjfkrPM=FngUgtdJjffhp+|&6Ie3@U~h^u46ZcPlsfS5&k-GUp06;)?V%;G&C_0i)B6lZEBR9Azr?+VSxnpY_(gIcqWXxPPH z&zIZGL)&mZ8!8Y0_34O)8ov?c4+{X?rl_Kc3>Lf+a+IUiTN(=vz#fmdRQ+=E-^Gt^>EZ-29ZBanEzK znsKk%RLkePp^7SyW#*Mo=tB}}4;#OED)z#%`S9Y{n)<|br9836s-enrG^T-8w4a~4 zrAQ(ltJyQ#=2h6xO2V2qF7chrOzStFb?_#f)*|)CsC9*I{l2yIAs3(3N(O=INW)-Q zquTat)$aj=gbd4&25=R9VG!#uzp|O_mW-yF(!!2hr~1I}3lDlEl-HrL)ON*93tyb37MQPWpwzgb+@`427*X!?R z`~nYqYmGn8pY*)sCjo3BFflCv#QBc3V=J>*;4o5vB+64nn|~5utm7 zh-EOrP+XtY#}OxUF?}~e z@n|n$fchDU*`W?c$_Q6c&Puafsv0+IK%P8WAk@Y~mW!_+6L3qeKMAyJoZM$KU?JEE zv}Lv6N-B3gBoMAXO67wZa1hy)Nc>=!M8MBlq_{Fi6Kt?kE<+*at43UBT$>FIiC=_( zr5a+tRK+C~JL7;jp5T``vDC=i?V0ng{0ibm@A)91EPQ`!5 zAAwS6vY&%BD@Vkbh=6Dl+m93n_1X+RnOF$v6L_%_>n}j|kF3+EuNr-1I9FnKmay$k zFLror+BeSDSfmdVt7ja4cIthW%Cvz2U;|w209ZR7{6dlSmrA}l%gP7MQA|f&KvixH zJ)zKC&~#h^NHJ-=F97>PL$yq4Jda4PKW8pJ5@P)tf)uhLK9cbYLy98yyLZ2UGWk#$ zty4pc+9V48|XD@=M- z=als;s*RFCeU3SDF{ZBHc<3=ge*;bpYtIso6e8vyhPv5OWXxz7( zBGsQ}G!;Gqg4uciX~fXN%oH8K4mi}Vm{^_ggK$lwcV47C>I6V&6dcGH=k4ps^=vy@Igbf>Pl-g=`FdBhbcCT9flv1f^bj>?HiG zuZF-l?f9uGYJ!+>WvF_{4{4%CV$GIn1ffu}fSP^q!LK!=K-j0=b)mmnLJ3A3UIx4Q zMV@Tn!Ml;u;r3f6g3d!!{w6)Y5$b}P13!X4o2q7K_~d1Vi5QI>YJq+=$-bWu3&_zy5@h0j2N1Cx*f2m$a{q#04KYbptFhegc*TWinE+-uXq1u&@_ zg>;>V2B^fdjhqtf)lSZcl%Fu6y7wenn=qJS!#PSIcQf2#TD8o1@qm(;Tm7LFg#*@p z>jgu*wmX@x=Njs~G`t-W@e;Q%qHlc;PL2k?f^Bsh`H;82{#lG04RH@zIk5E5vyZ=h zI?if(dGB=YOzJ*hvjOUSsG&8HdBpgn#DR(=F5?+3Q9r{jVEfX{iM_Lu6!|PaTMdfh z+QN29NH>LKq|YlQ{}+4j7$jP=Y>T#S+qP}nw(Zrnxf-i&yI0$`ZQHhW``i2C-4o}< z`~4#J?-?^9V^+U)`|N>0BPr*Yv9u?2#d^2_jxC zKqDpRa-+@AkIqtg6_{vx1}dE1o_qYdthU(H?RU`n74T9{J-&kA-E=*^2DMQ=Wz_2) z&?q1{I1Z1=7rNSq{3!JMt{))~?0A3BHnX8-cJrt}leTi)ME_^n9-GZmZM5BREGs5y zpBr{@h*~^vsn)?eOANIlsiG`YSE;$r!LZS{ig+*_oWNLh-jsE^EF0@SG42YVk93ny|Z)P1p#eNLvQWLFcy@mMG zGW3~`A@7mDC-50A(>wA4H~$&(BTkA9vdL7vLR#>^y_V*@Qhfwj{2r~P(mJM}+T{;p~5!;4bky6QKwZ%<)~az!y;a^=dpWiB@5y{`u!NCWnTeQE4|5f!#q zMr>$&y57{5K^g{bD+Q~cS%x0A#7@EnxR=0F)TTX(B`KPy(M_q1l<2FqaFI%+C8!l! zQMPB4R8|%9c1NeIj_k0Y#46*az?~Zd=oYiV)&~9!EBZ?z%3nmoIzLzhqO4N9Cx5)> z0SS@}lBX)MYqp}v!##Kz`ch{UC;et%a$^gRSZA2l>%t4G{fpB$Q4itDdXxb?7#qPZ z2U9D=VeY5+gHYf`00GF%>pe+)Dx9@#Z8pv%RI}isz%!RyeNsn!z zfm`m?A2cIq9~aPP-@qsl`$Dj*KtmItQ&WOl_A{44jIjA!;J*i`KsMK2fk2b^>VF)R z{}*!LT6_tgm`}qrJzE?Aih^o*{+eite7Ck%B3Z9hEFpF-I!WZ2DB8khzAYhi1+iWP6S6&mK+!!TL@Qi4I#3jI(R1Z(-wv%zXg4f*5goA#bw0ue21i4K`L$E+K$p7McwGs)_tF5Y787sN zc;UPSKeN-ec#dW*Y1w5a$UDOGgz^tu#`jYZXwteqNDjKgSODOYm!7)f)KAks%Qn`JDgVNs4$}kN=ZN~|OOx5;|DBK0{QwQg{RqiTW=uZE z#3e3C;7Xz%XP_uOhq+zZZ9zEJXbx#7i6$H+ukQe`R|e5M)NIhKK^ahEi^`-1S$MTE zD({{!o-78zEVBeAcC8l!d^05kcrClOEE~7X{Tn0&eqLZ8NOuPt;BVkAI!>S8*nZB?#`SvL$zbZF|8faI|J|4XC;L!b-#) z1XY6Jm8-_#X{Q(0E}R6d+<@cgAIa7z7D5 z)&-~JwJvAHv!WHH4C`n62?KDOk?Wdu|A{ib$2a%d`VksG*WSMi*oIh75Z|Om{v<)m z+&K{?BkC=P3)mK4%}HfO!0NdMC1Hs-Rsswx6Nt@3C3-zsQXa^bX$CCz=ADHXJwvFY z=>b-ol8|K}TnU8(Fk(!NslD7N_X#010t8tf6^p|nTD7zXV6ZmPkl%f*CY=tzDg>_+ zn?ZuGPR5tXf#QiJMiY8Sf{qzzx}b(tZry+| z>&_qwiNEM7AP}VU33)H<)&<~DgwPnsZtcobrE_1QpHliDi7()?u7DJ6t~Ibu&>x{5 z^W=Va<@YKsJ7!(8t;Ww6M(Wq=xv_ra$M10f=m&%uETkE)HU`02^BHqe2y3$!tL!b( z*Aeoqn?31qk+fXc%f+y#vwHUB)j_>xb@D)sA|Tb$DtsgfRRB>JJ#o|WXTZoE8M zqF6G0(Y8LqCUN$RGl((+ahC{P7F=hBi-q+zh3i;HmaJHYfMgCU>S-GlMg|$r?b(ii zqrW#S?VD_#qkx>ysbvUavGc>M+T>A$RgzzE5H&A3Y57E-Hi8sJDj!vr$`ZCata95U)wS5vf_VLEr?XkS;`9&Sg?Pa?z|sRr1HW zU?KoT7of_Fr|Rol=LW0NSx$U!;vg!F3SZ%~4Q%J)>=0E+BEe+Kd%pQew<1-E8{cR z_jCaF@GN9?8M&78<>H%xClw@SgnEQi3Aat1a*rtK{^013fH2fIvkx?3dXTie>swWP zc@V1QC6{FqxRyi|myAg7k}IN=@C5||Lv5GHo}?EL)Zrq-lfo?bcFNZEMKD-hgPc8o zdGX0+bvb-3a8VVIB8}3|43?|XtbjQ5NW})=0WJ7E)5ukeXD9Vr=OTk7rJO-(0ah6L(kYg=Xr3qP4l%ZY|q-D5w zB4!U-T5C8-cm-Ar>NQT~DVO#V-G)q!B19fFLe@ zb`F}IcHjX^7})u@_8YhX{ir$^DJiPojjnoo1|$Cf1h=zc5y~ zREJkw)n5*T8^&3}F--!;S%ya5l~L%(H+l;Cj|5)96qZ1|8+w)j3u{CM@8>IM{n=;9 zZ?vgBS&-5}h&)kl7ZVR};-!)04)HfMC6dbz{v+x*Hc{~(8b3c}3kD8UH(zLr!RYZntUPT^Y|SpFfTq^OxxjTGl4^UQT6bYQdI z_;VUB^70|vMqjdk8%e>D-zQ2t1*_<-S@UGxWbe1i^NR$1itvsh-t4pli2(u0gIp!u z><5`-ic`x!CIA;DzjcjD2?dMvAeA*KqRn}bgKwB2_lY0CMln`6wK*57#Idzpk3b8l zb4C9Oi$tEZrp~anAQ0O`U@oWiw|7@>*R@1G139i(YHBd7A-3GhW&JxeHyAhOmpPJ~ zv7*>4hlGU}t|IoE&&OND5g-@O*EpJ2tUrbujdMMe_GkZkod^rdx= zsq>i9+EAQ{YhpLiG9pye&9k-eA$eTh0)C$5LFj_=pSKFsU?G{=xyariT8@; zpLh-QN1j7Ovw-hlN8DOQz~NC2er~#`uMkY}?!uFV&f#ytPhY&!2lY6!1v51AR_A}8 zmY^!QT+mwzT4@o*xfX0Ao|@+a;<@{!GA2?ji3N+kv*hE~fU`G1ig1CI%8Lb$IKQ`8!kN<9_tuBewkb?1AM~Kq4&RU>iN;tx>`s zTtMVgxUl3(z1eneZz;<4cLWo+Hkjs@4f^w1xsB?xKlYXWJFR!z%>Y4+qO28td1Uv^tWLdX+>Y}Q{Cx|E`n;@AG%`BR~@KD*}P|FDBsX4T*@bEN4^YpC?zY;2*o2g{MZMDaYWSCCAIBW%3_s4`P0ZwzoxxlDse*P8cdiL<`noC zYb4g!(ufz9DkKFNEGjALi1h9bnY%GYcSc#U>isAdO0j~`asBT*dU5$!jIM0FGOVqz zGPkJO8-QPJ4vU1Tsj3X(AkLS9d5Nw__cG%)=995lXlk%!?b<5yzXG~I0U)8Iz^+{( ztblR}8MzR9XAZM&@tPTI&xwShYw#@xgt;N4aMp;!#>@GUrATG{25w=RScmb5)3ViZ2<4RBwx>G zlenJ=M-Y|1OqsZs68RnC5y?7Akz$FJ=>aF3_i=lDcJs}N+U2D2reFd0FCTX$-fJC*nCACWaLW|yx3ov4R2mV8bYB3fZB6I_nqol`fuuDm&ABe?7QmhiCKK}&ax8vX=ppCkZDh<*8&9jOq!JM z#!wvWF7X@?=&};I9m+??OIy6;d^xD)U4&PpudcbC{w*B zhl=m^aS>R)(<=|BTlzdsTz2WlcI~x4xx>`3b9Z)m&J@m_WoPzl@6P~k5#~`+zW_0! z&d!he*04It+6a26|3pAk--LxREpir!7HQ?Y3$EELB=tEId{5L!(GMFca)Z zL-z>>R0qgssUx#-b5>)0fD{+?piQkO)j;IanakeMK9@*&`*Qs9T*G9YuRcN4 zAAx^-ue#a6)zqB(v*XKE_dm2NTzh|B+eN-SRX=|O0;TUjD|Qm6s4TEr|=$uaZY>(yKzZ88f_x5ZWE1P&L8(UXcv#--S>k?)bM4ZqT!20 zUF!FhT?DBnWp(crQGE||3R(1Y9NGUN$Lz(ksQ=}Wd8|D-kL*NNG57B~cVQ4S-g<;A|54fsdsls656@No}tOzb!E}e8D~i z*_K`w!SQx3KkrJ#hH)fp&H5cC!*rDfUkPf0mkO_npGAK7PEvL(fPNOsm0g!Pu`!P6 z>W%?;oxwt#^CNMtnArg3Qt7LGm*`1$e<<-6<_)(u&j>Z;?ql-;f0_zj$oAFj_l&(eqm zOJ?H#-1~j65T2>WGv*T#%I#?AexJ8?Sp1uem2h$A#3w6f=JAo8djz>8(i?G_DMN}! zg-N2H?a628g-{3+bA^Pe{(8T`gt=J_cMO37Y8se2P zP*38QZE6~?e(*rnB^3^?JBcnT`r~tz3wzISC$o>}cyOiI1+Cei8X9-f2;vb2QEDd^S8%%XMBbk`IrPo@b`vYi4-YT3diGH^ zCcY8Hvgo_R6In=dtCFHA$XjV1pMyNykK9bzCo2#Al{sO+$Zp!O5F<5vf zH1-kkkZ)vfV%>}jE6h@^Z6+nCH@KV^XvCg}ewn{Hw1x4zO%7$WaL!Y2Y#_~HxmvB+ z86va0qoDl-fj_peotE55G)AN&*pkST&3sTn^3GQ}MO1r(ln$S;Jpe?u6y`-lKt!M~ zfV?Xf>QXU1;)OW80rB`{*`oc)-XBj8>vPm&m(qEq3h%K7xArO59m}KX%S6ZD%i~|B zl>a=}+ZpO;Wt_Az$%0TcHA1;aY~lUz zDUxI$o9One{hJP-jZ0p<;oD|71dJ?h(ZwCqZanLeWGb5n!1@@Cp~(#?{)a1-WKI$v zX35lY02oGA~5SR)WOr_`q8Z+O{)s*7vK1&Ko5+^73JlW|K*u4rmvG*DqFXt(X#vI5F z`4_?YeOFNn8}wkN-vI1D{WSNZ{T4?$Q#ozIHn6^06AhY;Cw3!>29pkj=p2sQ_GsjL z7YN!S8hb3=j$VrRe(xKG?hw+|i4flW?FxI1m$L@Abc=QPJ|S+{E9^IV;LwF}>nmiU z0|rS)?8`7>*{^(3jD9N@0q6+IIlQKsIx?9{?7gG-sYmRJRJcCSY-KJaKa~dt3bw=> z=v(akuqe`A!ash%_Xw9n4E0dyCPMD+WSzOFN$O1tjD|uvI^?}KvR#hWW!?_24obLx z>hLr>r4c0~xE`xE&Uq^%vrlj6i!MXws4UKYo!R*@@XpmWzpX5he@gr>Y**pu}Yij%~cG3Z|O$Yy5e0mIOm4_ER1vO=7y(V-F zpw{CCGC7*%Lsv^A!gj(XEmxG=Sl8ULZBO##KB{j7k3Tap@;WT8KFRqo5PKBUW~xfO z*>0F{NUqcT-$NG;N=fPG57F4}$5;#X|C*XGwzsi&`t>jDDohZx`g9ev?|$_op`TTkNZ%mCX_SByTYuTHUJCs& zK7U?;0_J0d!u5zJ6lKaRRH1O+-_c&BrLXJQSqX&Z4f!C&AbiMV$7cuYj_QB!k8VC zi=y5PQbs^o&9FvS@k^7pnt~mp$)4)}P7e*&o_-&kFIZB_R&?~4SCs$VEru79BahLY zy5rHl?fV3E7V)$^(*G2j?tHL)-N^MX)1m(g^`9jZPq#nwCsKC*73%+m`2U_C(A{)e zA42WAme~$CbSFc{6nYQa1RYiq%O_fn&Ma+;cl87h%ngPD!2oC|GI3faKLvgue53G* z_iYEDh)04^B-52HGN*}$HmL1BUcb+(H`n+5#T&tW-A@xcXVNW=J9il@85fg(VATD- z|0*f(#_h37PRuWt;Lz=be6(o~=lk8BBFNwJP`|gi0N)44Bzfmsv)xG(Y3XNA4^P)0 zk;8q;BI972X@=b0GksxXpZJO&_e!#596Z9CA=y0E<0k@!%rA9zTpz}h6Em!cfLGFM zgG45(8Ap8q8>yOcCJvMtl}x1 zr*x3q*OfKnNnwMMiu3ngX-n_|KiOhRMFkdu{vb3~+Vqejd}XKd(w6qBv{x z%SZNTpef+=pI;2Po0+CrG_*40&Ucm$NTwc+XUBtSIffgvRcWAu@Gs0U^YHJR3^9yo zM8CljV$>w+_1m)G)A%>9kNBrQXf9koNCcR){*IjC2LW}Ll@)Bq-tT3VwS6+OQqI>; zc>nIZ)lMY)25p84yo0eVBYJmh+LmQloM;%&rk?+naab)3&*jxExxM#o_&|G~RWNmyNs2|O6RwPiU6-aC6RLK@KYVuIKs;L97yKg-}C`d=o)_EM5BOa~5nvI$9$WPH+O? z?NHv8btWn@Q!aF z{LF^tqONXQz=6SvdG`EcT-xT0)5yA2SgtoJ85){C@{45lOL z=KE|rnm~=Nc|+bU_iF9UcQPUFH%EEcj4k>K?rA#WEoAjh$wZM(ieBUT+}fFP&y#6< zf7`z$YGb#5cGVu~$f7N$P1rm}WH_m~G4Y|Zew!a77wc5O&ndbKWIExjEVj#PW+}i1 z`EyqtBT|*AWh(DUaiAPF_bmA9fjA|S>XiFm2B(5cgv&$B79U`a6u)bKukFFHc!#?N8T?39;d^Aes$Bd^2dWoe)J0OM)q#D2s+1`$2_^B{LaXZe`O!6n zQWY&Vw8?Gfjp6vYmfqEhP82AZD&#})HHA}U?^9J0wl%t@71k;0EkVPK*i#mXw^sCw z15jZdI`Wc~`VTy(-X-3po?BKUhMqNonZgC!kY$q>vDsT8?Xi|J93qVJ^c9gL3O=!S z<%^-BNVNe8fZq))mWpGSD@+@LxI6_1=>PqVt*=N7eF#UOqL`vYCA(kMuz)Ov)~0Dv zJLdwaS)SM!ci2u#G`=a?Tl|2kSIZ)uE-T5#lj^&2O5J=uk<~RHRvDyRx!y%1q#(fc{Kv_Viv9^hHILWE0zsIEgTl_z~GCR14pYH;9q z+9m@KfH`RFmfR(;YHgS?h(@iSQd1ks`a z;zu5V(L%iA7WG#r=-Q9d5@?o)jxZ(b*exjA@3u8!El;SHWr@?tMYO}7n@d5`cmcF% zaqZd=4i)!Aq`}*~0cIg6VKMu-?YcTANXqg=m)A;L!a+4mI=3(s?!uP|3Byua0??n; zMu7=M2C@w_B&8!v%YdxFqQpqvpV83C6Qokapw+%4JAy1KgN6juKKP%GxJwYhnz*Kw(wtPtt?Y z&{wZWJz&<`7dcK^Q>wI7^tTBDZJoIC0FK7h;5MzduSOLB+S>+^ru5t&tIr9a41{LF zwBUD^HEL)?z%C%0Z7;{gRHcR>>ckxb8#RyV1+ZNOXE3z57g#Ka4CIS27G-IxmZ1e* zkwN*40{}rZk5)8FCdbg0P$~$u84keOBGhyMrY2(OvqZy6`QRZWHv7gTQAkpfw1i^O zUul5|S_B1+J?~CiKbrH#_7fayZ)7Wg*KqWUskW~AD*h4Cn_52Ye3P<`%UYF0ENB15 z-baCT?}0epFy!49gEE4k0Qeb=HC1?xG)`oO5o<7F=H!b?2O!Zt;382SC~L5WAlF6U zA*UH}00NF0uk;$Tr7|!S3f&w0erbaqc8Ud^Sgl6}Js^g63t5OA#D}H&S)K#4+d?7{ z^o^RI#j`B0+#OBkt#x&;JK*lT2h@8Y>9&oYo;Jng0ZeHWAZkg{*xrgOq%{8n4f+w@6(2sCv@M!EmD@ci&kvNC)-^?t zuRnCJ?T*69cXjZRh8|O$R@t7B&jnmls{o`{us#_z^g{-OVDq3~j8-Z6x4B;Hw+Qz# z5D^9hz-8O9buXwn!|?>v0rt$H=&3t&TDg1=7Gv4)2AojN22IbU(zGzPxnv@7JY z^-4BR{+_~cP3m!x$&-MATBBnAVMft)YVzDlg+>1>p2K#jk*~tZFgKj4nf-OwmbKnd zIJvG7+DJP|w^7I`PhwlK6t|N0Gx8o`RQfo*`SzC(?o?HMEmE9B?cJkFE_mKU@3 zZLrW}U{FA?crCf$FfVa4jeY+*T!qha)H_^7p$lfUE#X))ZoO-Gop2&OyduoJDw3YI z2nvK6gCYZ?S0aQ0zgjlRKQK8Mek2N?Yx5O(ECp|D_ZdX%Tq5DmA-xiFG z#IjfmUk^S;T)u$2V4aY6zsRPQa6qFFBZkNg5PIl`nU_ap0fdcsSQB7nPz%F60g5E` z@gS~0jw`X?#MF#1og%@kwt zu*w-m{n<4PI5rv>TboX@`1QwFfAX8iVEF5!=HxK0^3i>3XQ2fTFbu1mMp zS6|EtF>>~M)1x{^a!m~c?VOR)z33>B7eOEw>pdzR z1Gu+qE{#wpF@E$r*xBDl)@6ZLegmFYN07xv_Qq%NjBmF(mtslqsRVtqa zwZ4L)o}@#G(Cf|S4HzM&9_v94P2ZOfN}oMtn6eePAAjh#bPuTM+zz4c&2&5@$KQQM z7wSKjz331!{p#>CZL`;e}=lxs0V7U^BWC@{#h8iuZ^>W=LWIyqmM-)U6i6Tx?3L& z#8G+sWm&ZgnmxwmVz0>RZQ0xsm71R}`ZavZfr*l<*-+-Wy;?|Q;n{dVM&8__Y@VVO zN?bMZDVP8DJKOA7(cuxy@os#=_QM4m7*o`;OJj%d;jRM&5=c3_#FX(0c41Rc#{*u( zvKcv*;m#ODy?1#OC1f-KIyf$t9$|Y^B*9OH#Ld%>#O)GTF~)vN0MFl`Unyg3Mk~ekW{!wiU9pUp*sT zeK6xr+5xvzQDR3cAxZ=4im&?zr_&A%cbiaK#45YPPu=LHIo@}@a!$Ni1p<7sPMdT_ ze>K|momQpA4Q~kt@`%*xc5UGA3$%Vu3{I6~_XlV;IiCgU;;-GKUo^kQ=Ob}`mxu>k z5a5&uCx(I2;>p)fK@!}0o21=7DJOo%4J4r{Ri26~^6~ab5~;r$h8!tCmy6OQ#uyh2 zryBpMA<+lie{+Zn%XDY#u;94dd##MXcY2zia}>6QXtG2j9puhr2iY8@Rz4ttc<^LljDA?P*1JeLSZq#XdS=6nEj~$QbiYSFX7TxpTaaa?4{*(;36R@3^U3 zZQv2II6O4tW#b3B+UfppC$#@Rs=@y*5om4xryBg9YVd!m!T+fS|Np5P{4ebKPc;|- z3;^N>`UwGM(bJG)eiUVJ{@)d4nE%n&xlmKI|MArF&1v}d|B$Qg6%YuZB4JQ1Z{xG! z?_ny=1&py-D=dH8HpjM5YD{aANMf_3A9Wn|aqGJNARQm~S=NG=83}!c6hKGN_*>Fp z^Y$-EnH6Y*(SQe1OOI)!n-P*08Gb*lD`~{*12GM#+>}~R83A6H^%X~wN=@+t(Cs=s zQEECarc!aokWsO|JthaR<>g3 zChcrmC~g%yoSq;TdNjz9UHobTc$ulcS+x-=1;Dc?2kO6T@jio!93;d^uUK8 zzKI^D%vAL0IpjNmIHGh8z$G1fx&_*thoS{KPnG^1-`@kRht%1?Fx!}O##{(Rle+aAU3&J` zPl^4^Zm$PvHB>m6VS?JbUtukl94tBrdK}-BTTx^@i`l~V#q6#+zk=+N2@ew0p=Zb1 z{uU~%Y|LuMFr|Pn0NG`Bm@6`F%juq^90Hvo&t7TuU`9s5kG)Bs$5%2=oAh{Zy6xs) z@gZHf`=fz7yhbjtSttHk)+3{jhMVR5gjLFEc_!G+orcX6=lJF4eOP*=d@E>dEGsg@ zT(us~0{F3Xi9fpG;JIDvi$N+-vvd*sH=`0HIsEY{q5?_wFK*UK%`+1WbH1@B8QQv- z;g6n-OlJY;@fdK7_jMwVdlYi>g4=B&g%sCR5;%(BcQD;+$pPRV!vnt4FGUY`riu5{CofYbTkSi(mS`qeZVXG5bLq$4Uppb#n+MJZ5CB*T$&4X7O`vD zQ85TJXhEz5=-SDT?6OVp4DgKbOtVeo&4A)bjZI)S51rnGk`W5yN&MupuivMhKXdHR zXLraX>KP(p!$cNCg9x4yG%@sX;rV@2siqv_4)u8M^GV=h!rTPeKITWezt^9?{`g%M zy$;UZ2LVDw)DwTZyJsDb7xec}bi<+P6_bq<47uHt=(=6n>X^3^K&lnjWTlt?B| z{92Sp3(K#$D^|c@gs*7l5+t7N;323I183KB=2-|0yD&HyRNoUopvI>k=w;CytT$kj zoWqDU-v0>kV~M;Yh*n%6iTJh9W>Gz;N85EXH(gN_ct+=#kYb2(us5Q+e7o0c~+`=FRY#lI8u>vj*d0>8%`=i;N z%UFCIHE<(dg@FD#>^<2jFVjFeR1df3qu{?SyBvn_@;tGrsvTVZSmGZN3_0MwUv%TE zwgv+?)S9dLZlhgO#cSv*%R%l#Kk`ScHjl2e2_D~u)2W@5$DH;_FQO}vaW{XnfR`zr|_+g`70fz^*oI?T{2dHR0OYtk29$pN{x;k#> z+9i=lpV26t`vF+*WBXQZ8EeDq6P!a0>s#L<82wi-eHhHuC*;2zVl-0>gE63I!LOO1 z>UuNoW4B>_dA?u5zBy9yPA-yPM_X+64Ec=E>FyzyCOR}U2c~K70j&tgguhRY4rrIg zPjbF7e&3lewltR6W#7(@L3QlXwwm07YP*dE4ub2oS`8!@+}Exy-uuu@!cWwsioV=i zq)O$mt@Wo?O4}dvgQ=$m*+)J$x?gn-=Ulc$2WhkE*NFAE_fici-J{F$v)YEGMoLe~ zp<2D9L{`WyDu+b=NOl z5Cll4Z1)179UJB{XyI5zq0eYtVS+|(wkm^2j;&&-P*jDs^cE-|RY#U%H&US592IrN z@V$Ycq~28TxK@HqzU9?`4h%kz>V5doI0W5WGvvEMsK^fYT#6p{idtsiQ7(Rlk6;B8zmZe#s`T{&L()t^Bpm zx73<{kkUH!$8Nb5Xvgm2n1g@WC@TXG;$W#abZQT~4IyK&SQl4+t`?lC)oRX{UwMXZ z5Ugns?+>6utE(Pko6Z%Yz1#tw;uT`+=LP03r22-2-s}~;Gw+0vrIsr82+`T!U4|E% z#OniaW{2+wOX{`rCOTSS+_G-wC6?jD@@6XA4|PE77wp}|sHwABPN2!u4!y`@s2Wv0 z-OEtFqp8&lI$XPf)J413P*+Y|LFp!}vAqn|oIpcm6ST#$8^N=C4l>9XCo+J?XAD-o zEwK%!60ctpC4^6s0d=s1iU?7P_hi*Hv4jR=+~~yvM+4+m$kgq3C1xxjX(OBTugRWm zCfHh!!YK-B8C6-98_V+Ar!W_D?Br;Fmtaf(UG+>@Uq^ZmbZzU4a9y`mrmJ;|Na_-s zTMi#KayAUZyeCDg-lVTpovn6_U`>=uS?T*B_zq}Yy44ohdd~+vSyr`%yF-3GHh04d zX%h}GZm-bWVUJt7w?-#@bSiTz%qmLdw* zNIyebZm_xNqU~S3J3()H-izqs66U!?6F@b4h6PaN2xw*)IPU~3MK_H>M@)O}s}QN# z-`b63B9IDJAN4O?#zJ`X1Vs600Cc)p1NJW;vjb7Mc=iHOD9%eK<1wr4YX@V*DL@oh zZIiNDZ_trq8E3PtWSgiK5iT|@Q>b9DvG|2S=Uc|e5VxYjij(_u;28$J*2hj%&nGO` zDz?N!0@y{rflGHX^S zz42S|7XtmTM?BQc-8c>b?Z~OIuwP&Db(cg0+!!LoBpD@;1e0WvpcRtX9g^pq1P(kr zjrou@g2XPm?j_2D;>{-rQc!gZ2h%ZDUao1-dQg~?a1aI9%N(z7M;jBbUODY{DNwz{3FKM#_veJqd4vbTv#5Qn& z&V;H?hTTyKc58qPl!6yI>73`hWxL*kI!Spec76bS?*f2u~mDy8y)e=0TK z5i#^zwOjGOB{oBxqxI?NcoaC$vZUdBS9VpwB*GIS*C^%`RCSLMHN3|k?u`UvB3S`Pi zvoOHg%T^Yc1gzrh(y$6Q<+)kR-mbkO;wtPgCTxpAJ}aIBnpv#qP+{=vw(f)s^9#yD zno@vJ1>E$-+J3jXdbKPfH87s(J+E+V?m4e;LXVPiSjCWbe#ZgC7`sQ3Hbq&8x)iO9 zZUt=*10`;B)>=?ZRPwY(jqR^*x}mZ%c%h|}O<;C@L7PFf3_6+cw{EO7x_hHp)WQ=s-x?doBBQt}YA!!5N552y|F%^~CJ z&Dt{M5X3rcA5}anVQH6$Dm@f((%B>tbsl`+d)}lIseJ0}UuMj*6Zfq2oNYk%i{NcW zgHUP{8)407sJgwFkW>5<@LIp({thf)%_aeY+wS?-0+BHHOf7KBr<_D`05~L{D&&$v zCks`ey(TW8a+OTE0%n;7)a8R&nXv>?YoNfc&+e+5o^lZt1ee?x z>+MfGl&<{_5A1~ZR;L|Q52S{sdunm~0J#G3eJyrQTB~_U+l(BJ+iYpSQ-D5`vH{2* z)X6G|z&Fv1hw~-TBKBK!(Sj*Sgg)l7QQadnfr(pXDvWHINOsh}7A+5dt|Xd5GKJ`h zB>o+9KWUTL1+QPE>b*EVR+&e97`01Vf+Yk6|!^JkbU`Ys8e@-gAW}WTY zZPH~isN4$hE|{XtR5nMZx>hKqJw@W8E%~P&1~1V0#29dk!Utc%$;Qs zg$q~yPgc}PBxlpET@b=k*`nA?B|^&Tjk9Q=VRl$u^l<3SU*Bw-a)iuRjKebUNzC~V zW@(mHdo78hK_7hw9lCC(*Uc$4Dp}Aum6_Hu|DqQJ>~*$18?OPdq@wHk+QoP00f#&} z<#S2JS3LrS5=OZ69%GbzT3ICqhgj9Dp&L_ZxFWmCZA;WT8xDvrOZ4&&)FZyxL}n(S z$eK7)WJkqNApKa%MfuV2_<1y$9$-thALd*9mx4&OBs(TFg`!A2>AlWl!l2>Ys|h9h ztrPP12-z2_^j`;*UZuzfWQu+T5Ne;C%~w2%Jy@iS61@S>`}xK*1eU$4U9()f^>OyY zL$LE4UOcAN4x(iQ9KZ%~y2Ac8=)9MFuE{ZSl`weov@@*lZd?gLu27VAOj$`ULY>{w zHBkEA5QkmbyIauWUu7ZWG$PACH>_R~sXvekXS3`FNpTz^dk7K2VE7&5l_IbSz+nlP z-lP{B68edq;0e1Aym-k2jZ zpB(4}g8n&q?gfg~148V%K|to(;^L7IRYfdRMOxS|c?GI${pNdbzwvi8!!uWBA=nV{ zg!U33{I~-V*%AFw6vol~qX=z*(vY_*Pn?&f+Jei42~+zu=Y0r!LkQY{ZdD$XHNOozD$vB5 zKfX-F+%)L?N346dqb_s;{NPr+Eth?`_FnN{gF1C*Voz9|r&&eHdw66B|;r)#! zQ@w0SLYUA~Syd$Ypdt)3EVJE6uxMk+1OGd4IMG#*wTB3E*ZUQNE&W{Nq9(N#Kw(CW z2{s= z$HJ#jgt=C!EVfv?An~|SGIrgxQ+`W;0@ffn{1wH_hrt{kN<04jQ3|yKcpQ?eF=`#Z zcB=0hTqWt=<|5onGOK3x<+ob<5JN{lE~(@Kk-sE{V22$FlX~k@cC^sUfgxU)ZoFsY zp`k_mr4%1ohr$6*Q}hK_zg?!?C!&LgP(* z zqY2e?38wE*r%DN7L7I~kQdU{aEaQ1hDJa1>N5cO_7%5c#J_5=G>OwyO>cY*qBycyM zTd5Y+Ehl+pRR61T`4V!Fx6C}#8;d8xMbthZWA_C|nG}rl0ZZ8?8o48JVS*N^y&@P% zwR0kH1LTGL3JJX_1k6QM+1J;_65GmDv_rK(bEx@BiD(erboZ`LsY$%EXc}8-0Oo4p zU7syQn|e;0i+g* zO(MI9LE6K%`}+jo3?)Wq`ZAC3`j=dTOPw+d6AC{c31dKWM*x%fOAn5LUIdV@&tn7X zXVBAOr?}O&&kAE#s%KGgn1mq|)rlT5S70Pa>uCZptQv|vSZR5ym5`t+r#~#G4>ebR zHqyd$8ga`s=Z)2-RBdWZHn5A&k{tC>iUwH}`^fM&T_u|2Ihv($J1vU~1;Lc!U@Xq1 z*UcvxVRbwpEeuk@jBome9>dLH4(Y_AyI9}NoL`1etHB->zFEOo`^r>VWjvxXEFg2j zBmI6s&80s;)?51LDqsb4>*opOau^g~^~r=O;-Ez7*~rtw0~e!5Dn?qTp&tO+q?n#Q zL4BB*x_1Gq&EcM$bQRjQM+@u+NX^qckcT8lF=A7i5D!`6p)?SRNIcxL)_i0#8=rkh zJjC<0p&b$i%S3b)b6{=hAedEIu;o`_ue?%4;Ojx!Lgp7|NW{gv`td{2)gdfseX-;L>eR*OU#Z741L=2KIroa&>Tk^;e%_DJ_MFCB+Wg#yPK7o%rf76#5z2rfky< z18>vKAi!TE&wmcikw$es?Ck`p z*@lu_3KmFOCfX32HzpCawvK}85s(uX&JMDPjmLC}&>hbV!KO@ne3lxr?bt70oeZOx zSO^2x)_L|$`xUREh43J{`pm}o!!{l8FR?@MN7v`$LobqK;pH!wVlAqONS_o7ZrZr(IaflR0~GEF zn|dnV?YLFV@dhixSrWIKNbGetGo5(vyc9{5h&sa`B$OmBCnmafPC6{D@X*II(9r+- zohq8AM(~T}FO4Zy81N0%mX=UnCV-e^J!Z*;a8=DvIsF|tLtK(n3EB934yXt!LeLfg zBkRPjUQbNNy6fffQ;7~E0zsnA1wusnRW}yeh(1P&`#P^08M?qsTn{Z`q#SD`LPAW0 z8Mot)h?vK1>)SiN*wrER_whwa)L1u4w1d<{kr4p(AfnD%eyw2O7zspGE67gU0+KYZ zd^fR{IhM1=hD$bKef`>XUC7MGddn=;n8>pn-R>tp*$7iN;j+{AkKe*sa`&j(sQ$at z{ZgGsFbsqgDN#axo|^*DhtR;r9gy_z2sIG>2qk4H2FVg$fdP_Um$BCx<=+y^ILDr8 z66aV7_o{49lY71N?kTCV3g5}CT!MC^JZ_t=K|fPZ*bH=%W&+C53R4lASmq$?HfP7T zm7lcrxFQzkUK-Rt9$`OU1Fyix7X z%J?~7X4KkzRgOH%K1MAkjxnF-vH$k;h>m^tVCmzv4=u-?` zm!thv(P!Md4qv-h4zi2MJ#FsVJY7%G7XS6j~Ob{&aHUIfQrieQmsT=;hPCbs6Y8 zArKuvc2z$>qKBSkz3W;&XRIA;?d-bou*Lr11#=8zz&muA(7S#s9l%GgLps_o1KVCk z_apDVZK$IEtaRPY=WPvkm=j8kS#{8_LUFr8#S^de?4*zrS27@p5WmD{tc;DaW-ktW z8a>$o9h!Ymb-V8o)Nu5AE=yG({2fYHfau=LG3?HsV`ckT^2P%_zA7+h#wRujUqJ!? zH*18UiS0m0r7BDPTNp1>clqhU+q;d%1cXhyQ*r?Fg4iIRUW4r&Y?vJ@Z6@T!`qqy! ziIJn{$J5=$KS-u6v-NOr2w~`YuxXenMn>zi!YeBX(Wd6 z1jU%R`EK307!GOmj*4%7syz}-R$NOEA$~vl3?96t%mnBswtDZ}#4>NhQ%lakWeEq> zyMmv$seUsKFofr{*pNZ=lpvsfoh+wu2PW7Sz-Y_zKGH=>z#e0(yW5aYv~m0@NYhS# zcw1B`=%No!4pi!wqd>FF07nmmy}%$f9B(zS4O-y>$yh*QKzWf1svf=r>M-U=x9t@M z96=b2WXvAGYuXRE!HAD1A)RSXB%!lRCCXhM6*PadM)=p(att%aaC`3%635fw&wq5* z{)ZsYzn!&zfin<}1o+-p=6q<^dr$!w- zzL&n-4a`2eiz$GeaY)p&aGmpth~ChT_`Js4^bg#UjZRFMBZz@PNk$CZmbxeaGu9)`|+fLoH;oWNKM70EcNkZ6djoHjNYSc)j58V@LZA`CzeoH#1@fS zXt%5!D$w#l-fyfeia3?Hjwxgg9tF5xZQr?2c-SW7@kZtp=$hK$p^A6Ku1Ap7{*6!! z^4`Mz-l}m`x4=g`5=K&?7lBN$@nG<&F7IcMh*v<^9&c7}clAEo@9ebeVm#Egz!;-9 z)*_30x2OM^Ik-K3?bx+VjKZh0C!Ud7gA`+g4YAWnJP}mT0pT3Ry&6-O{|@}Aj{mzV zon8tlG6hW1n5i0(sH2&;qsM$5gxvMM+k!gQt5aQMy6;smgTI0AUwR2ImRt z{9Zv`M3Sb2Owb1`L^OHuKtGe@JR~z@j$o`}nf|7%oh(gu+w|Z(l^7zt;+#9<)6q!< z_xSx5FnqBn_h1>1phVUp5q{wm%6_->BypN z(K@xb;TUItj>5yy&YV5`Q0E#!&G1?Y{G8XCFl#sx`Rw|+O~tXYXiW>L23idzcNGsG z{Qbo!#|ZWekf)%osH8q7zBK`HwM}uuJkz_&3PU4(xCgBBklEXDnMwXUhuV2P^jX$Ll-T6bAY*1=U<`vO2|S^dAu89P=k&+W6ZdWhANqS4sa6R8NZY zz8n*KA~gr*q%Kx|Uu&twxYKhgsELRCL@)5Ovg<@+HN&z54ODIuJ5QIV4x5%WY5V`b@A_c=z{`uZGk!6pE=-cyx7aSE~UheC3A!!HZ1 z!%&_z%cLs^TuPOz<%n2h4M51mSZd@q{-64>q-*2;paTs4j}Y(&cV4g`OH1l}!MhzIyAbR`DL*m&Dt5I+j=CDO;;?l&+ts#?$3{ z`@6rWvip$-QaSH>G|`1MX;O{uqa z)|Ex(c6jWFMeUI^($E>J{dG9gZX5Qb)8|N0YC#h9-=Zo9?pK(nrJ@U}{Epzuye-Um zBsnqtk?L?;2l&9#yi6FY#T-u4G8wPH4~LP=50l0vlP+z$0e$=_jR)itLLQQ|j&P>t zpBFUmd17Tg%Fq`6fV4O>jdv^!GvLAxBB6waUg21?K2*GdOX`y6<%Cpu+Je(u_pR8Y z1&MzZad-b(j~30tx4jYgKf*_##4+P^$nq*t}Vfx(9MH4+eBTS?EQSb^h9=F#J|*x5t;viwRM}QTTga<6caU#OB6c+ zb#_D>8@U4RML8RIq~Ujlcg2d%eOMEKx^!9gX@sOshKc-L8X+b#2hr!58+-El5WkXp zx6ur!xPaF>*~q)W{PCA;C!hAMBRuii!Q;~gyk|e`ir3m$18||e(T&5Ehr5@9Ja;bB zIj2K~@kK&D9KpNtWpH+b@ffK}&0Qk6*2v_nYLA(sS-Hb4fka|kfna-%Z9TYs<~|;e z0VXDMSZVG=C!MkK-So#(Y}!9@K0FSOHnmGx5QW9^v$1|FNWS8hj$H@biO&zvKGN3s zHtTBWF?vTvMdMRn@H?}CAAQl7^s(1*99<#Ykrw4!{UW&^i{VZG(V8G_K6!iSX$I`$ z>4o?!6u?Gd3r~OvXvyKj`!fK%3lOa9RB%EFRUorDoVZl3t+VYREOQdMY_9JJ0~f$f zrep+N-7vNQ*iSUnDXcu=?KF#xxA;i}t7>JlHz&mu1jv*n+`@{T-CXEN7nbmkp|mta%< zIkJRIws_x9SZZ?d2rN=Fs#7lA?o%8|`+-V~?863?zBlx#J^UTH`YPV*s~2CjVz$1p zEI~mg=YszslRXF%?3`fRLdH&fF=%QQ*GS|a29S*ZhlfA zJ!0TtP*!v?|nkde@b1ftB9|^GfK;WE-5UABmZL+Fd@wzXky=TWQVg923B+Ab~el zGoGmuH7w<3kfjvow70g@B4?&@B_KK%+q#?uG5JP9-?_lZC};Ha+>7obuT-<;-P=f$?T~F?`N0lruu_u>|rZa!t zFYI?NFyC;13~&NGULc9kWRPb8Ss^Q#7!5Ywa1{*nDL>3)xae3^Ec;5T76`o9X@&EQ zjg{*Fvw;8SSE;1uh{ZhFPL}mWPULCl_cRe>7QtqIo!XEYbD~%YhEw?5ANfd$A`F!a zSL3j4Cmh#n!a{tYOQQCwNTAVluXvUc?OqnpWZucg?8wuT2G!*p;2Ql`UcxsdpaY^@ zeFqu-C=odEQfSRyt!RkO?pvI>ar2N@@FLto{vE>2qN~1RT(Er}!9aKHyE8=Qy0nFA z#xZc4Y=sSBEwO$8R-J>4KQi_RgK@6}673K?k>Yi%n;t_J$c*}wqNlISA!&Gb(=x$u ze7pg=gXTUFt!<781J&?WhC>x*`7`SsWx$}>xeYJSzcTLVxLnV7&sckrt9HxoIo6?h z2%R}wyxPJJ+-~`WK8u>QdBH#U^dbhLK>0dsc`V3_xcPpc%?{U=1&9-PS}zc@aJ7Xh zK$n6<;l^!SfoxeV`W$mh!^EYoTRJn|L>e^0p9YZ`} z#cI8#AO^+?384j!`FlL9GwU!C%$)$vzj`L3%=4&k8s$tE{5b^qm@%VO!kPX=A_%xp zbXIVCBB1brA**=HI=q{p__?$r*^1{ZW*g^Bi;DHTKw7&=mdg`AR_TB%_K6rdlFf3S zsBr1__sCiqBIXl(WbSbO(ct#um}w%dmpW239@ChX49*nwN}{N#8zq8?1$fY4Q%)Xn zg-#l8Do1F}*a3fWT23v3j*~}(oLALhZ|xa9Z>*3s#u@5&=Acxof9{Nk&198`^=8h< z8=5|NWW!p4P1Ab3u=JH8p)u*^3Ze0l3LZlV_KCT7Kb_E} zhWv_Gm>(Ifc0e?6RC`MlheTRIi)X^o)pMxxFY#9Gra5o% zcw;IruNoZc;K7^t>`Kb5!8MP6e44Sz9Xy8i7AqtQJz7C8nUSkrAFEnPP}BA0)!6!9YPax8Sxmgdbkrh4CAE}B1nTi!jjg4{ItCYAu}@(SzX zOLg{XAAD1Ch#_}OEFImVyT4z;Ql=8OK4yS}zJPp4)lK`(nk>v-s#ZJ8BUi-qfIL5A zz}pQr#PxM+XulU-jlGHO?E}3}b>pI-lMF9e=&fwNQ8Br#;!Eo}JacrldLo~vP zLf&-W8U}P|f@gvm&t1W~gpziWH>^dBUBwuGCZ&Cf1>qG$&r4jxFYT4zuKm5wG*w1Z z^v*~R4WA)P{bYT$7pp%ymywdPKoA-Iga4macmEGk`oHy+f28z(r1XEJ^naxE|6Nl0 zzs|0Ir1bxm($#gxm015Wwcr0$sQd3mN+WxH7jtV<2m1f_;$OEcQ`zdjnA&eX^Pj$Z z7pT+}1e8)KAf=-6i;yeu-@_m*y>VA$W#!*5Yr`aS^%9yry>=tKE-yT$HPH#`uh62G^iHekL#6*wV5g!b!(d9lsGc1nF;-d5-<45t?y5n2- zEzzUADm3}(U=Xrusbf5o28V(|{!xA;bPWVwZbpsP!q+F8k_>h(dGkGwuATpaG*k}8 z8!1b&de%ix)X&Wz9(Hvze2E2)?PM;OiOu_e5w*TDa!ekuiC>7seS(iTz~#Hpvp@FgP{EPs}?coz~F%l08oY{5DQV%kCFL=gl9}{Ce-xDo&3ZB@gOGhN(ML|;a_Y{2BTdGTk zr?<6{Hb)#(S)N49n;p{;cFKFEb`Bg=`7@=SnRtpTPw-BpUG4s-sHl+zB!9-y=_zw` zb;;{^rVh@2npKOeo*jUw{;0SF-Fy6I|!OQk4QVzjh3uQL2slFy;|HW0ujY zC>AODxXI^;BJE^*V&iHb6^ummQ{79+`m4z8KNM0_xG1P=i-0cUo|S3Ke+9-|u>5## zCYXme#p)Rmig!%%;cmB-Be^|n&VFWyH) zw9~Xu&U@lIyeA@m`oLS(~oH58*WFCg`79vYkRieTh~%+M(a? zDoPX0^P7t!-Xf#Nd1S>-B-79FE%3+nHB#ql@It-GvZ8%P#gEfxw3SWQxp99Sy{zaw zrjCS_*B1T5G~NBy+0s;a`Y3M%H+hP*^P{>2ao2XS{EboF)_Z<-Af-` zQ88$mOc|)Byu+f)6X07`nb}(s%%5zQ--eF%LL#J>dZkjt1wGjdx|tp=Ag4_Z*3?qR z(qLts?pF?vRnb%xHPq_+tsN^PgoeZNGsKr)_`MOzVF!JR0Yj#k22-SJ#&M*nqVRHN z1M9DClPkuUAL7b_XXtW=%DRbaYK*nIsHvA=FEO+gm#kFPT9#4Ewg#H$8sdn=!dYml zoEzew-W41wyICCR(pBHUpAM9Z>*JrQtNl#kk5zhN;{dZpJj;`1JRMO{g+aHUUKR?A z;ajb1PrJ13@pQSnoAKe|;La@{c~?)w5+e{V-z>xe zVyeLUJ-I=a6a8)9_rUPc@pjQNHBJqpAQ)AIDhwHBSVHxA)XF1sIVM1}^Fw)1 z{TY%+f{gmwJDjSr|6VC`#7)z@hB7OlDvrWr=c0)zXUT2g_g7`v&T*CO{!@T4XZ_Rw zUIXfBb5nl~`YVgI#{F5gXWf~g_9-IwS@x;9TlJE`Ufr)3B9FWDsuT#e?DQB*F7G{S zY&^X@7k z?0JUUR(70kL%){o)CLo1nIa#Ssd(Mu`;YYPFIle zzs&!p(rWv<=Nam?v5JdM$yqA9>pAhoy0Z~GDDHnJ8|(b$M_SpU#Xfc-sU+6-mxUDH>+nTF$XzZV2qisFl3bu{}p@+^oxvh=|v3ar~cO$oF z-(o;|%B7jn&e?7R%7f1Y%P^b^-b1aYo(5kOM^9)S+sW~)xNHej9<)>5;o?WZdm+9j zHZWLH&(&^LMS2cce$cs}1>~UWNOpUyXdnjfme~zN0k=Zd&S;Bm1_Y9zM{q-iG522L8RcdY%%$M&>H6EPPt zq1Be+3F85Hm>@Y;6~Jsc;lelY&maUSJmE*6XBHqa43Ics@JWF$_AaXxDOMj8t-VkT zL~aH4YwVzgG|TP?0MjYmw@{25fB;4XI;`G9)Eov220RWO0f54flBpGm%g#GIh(w_JXs3~ANA@TbORQA;}Yxh#M! zxgzSOgx#y!)8cu+tmSh@v1R+lAF<^=KOnAD%H!kLDq!3v+#*NAUe0mKqN>`ZU7SLM zaZp3RW{qCplqRBaoKv3huy{4!=xT9+t(CvW8W6c2hz13R==-9Lk0h`o5U5rPWX!88 zEOKxruPL>SZ#0KsFOOc$#U4KRd<`(yz6YE@B62NgtgyCA6!k)R016WV<7;P)s63x#s z%y)9wX7m*qZrsHtdDDg70pojdD2}W9)Y=VY+(WbUp~O{9r@)cLv9*J0w2f{Q;^jJQ zXMq!Ll}AUTwQDb{@X3vQ1fibH6G0z1-Xjd(;g2e3hk{{X-T|l}1UV5!I^lH3pQoo< z2myu?c?TtY4anc1%jS#_{Rg|wK`ybry{Vh!J2r_J*)=-*{oV-gwh8E0sv&7*?pf3Q zAwqyqUZuzSJo4UBe_$8>d}W6czy;BxLVCeDgru#o>j#rH)9+pl`Zq9l_+Yg+W6bC7 zln^e;pJXuK6v6@j?rloX*~#?SfvmpbV96b5f?L^Ug0Hc)vaOZY0`>;sLz$Xw{^*BI z+c_s$+)basy$)6e#1??MmbL<5T$vU&mn`+ophEzvuFPNZ-gwo^qr6>4U3}OJEYi}@ z&}4nMc`7>))P*Z(x`FEZTsC3t1(WR{@HNH{1@$SfZBmHRk`P9Z)(0Sced%WsT&LZN z!c1?qC4DFbSlD&oh;PDb$KK+x1N6lC(i5VfGaQSoTPc6L0@|@qwjn;SqMLiuOgK#h zM$;PRzvjv|!h8`iOj>sTP`?Km40qP~`-^k?1t}D|BP3E=Gt{XUVt_|P2NK9b2w_B= zLpRb?9h@>i1vAj>tBYiqXYXTCbmzc<5-232s5h#OVfm(E$XeO4xK|4e&Lu-;M$6zZ zj$|t-78?n@=%mkRDZU8?%b5%71CfGu+TF5`*mRy41c46G8d|Z3pd(+sb^L8#LsQLD zh6%F}OG2sle3e?Pjos}CoDrvlVfp#c8%(`(b{3oIR5mcF3rsVe86(SeSu<8eyI%Ls zX&Tt5cdi$zRyV>{P6$(BVb1>z{k0zm6)Wn(ENZa_^ki6FgZ2fQveam)+?VVz@0Km0vKdJFleJ99hAFI=Qk} zF&9ZY-i0%R>Wv-NvC=RaNQFe%Xf3WCUf+T>tX8(zr!S`#rx*p$@RV&SY{b| zJdzlcekXpIT%7nsXu82kP%wrH{p$mu?$9=QvCSZ^rZEcG`fwCLavQ;Rq#xiq6EOc& z!wMH#!QJ*tyb||*kVpr9%wQ#iv{wyDTnB+Yu!KU3b|t4jFMgQoQ6mN5_;go6;u6=KoP>l<|MB-BLs zR$z1~4^tq^nf7{{CI5B0k}^hgBzTK0urd=&$xy5yiV_5gO<=eRbc$Gax~C zs+B(`ZzfL66djkUd?nOfbML8EyO1B1k9G9JQsh2Q4l8gh-7kr=MmHU zVyj-JIN(7vhr5>m9*27$!XIE{#Xe}c%Jqf6J~g?^2`T-<*nF@8r`ri0=?vSscJ`t` z@`(K>aAdY1JU^Te;#Zk)wO~bxYv4r4I`RT~RuB0;|YYIaZ2X?I7t$AU@J4C=ef> z^En<0A?f}gq`H<-QO_QGH;HfBsg#Fk;Qb>Qy!RTL;eT|PFVEPa?wwsosRNRpqn;Dv zL0HmhIy86K>XIdf+8s@9CX=@HSV_?}7VTlDa%#K7^eZ8bstPJ)YR#=UXlcNL2V&lZ zZ^YyvF)roMo$B74O_8|_t9+Z{@BGJ;qqC6rTI$Y#+c7R4gC3-ExMVl8jMMEN_5UBi zMGJ%1{wf|%$b8icIK%`CENO|7=GFKw)(`tD*-vfq!iwKaQBYxVqXt@Rh6+Czs;^Zi zFtjuk6X%Rc?poyKO;5Hlhff}jfAM+Xs&t1UwydPnnKQdzzbaK!MeWI#YNWraOt;2> zfMa-o$|0D@4b1nu=j#m**)zd#bU!y)1Pnj(6E#=C|20xOp%lRGL^5+%EvENJiu`{w zb!Che%R!!L6CU9#F)O`A;ygu1pQF+9$i%xH45@Q{+WspA-F^KfKTT}l+w{tdQP)$x zdD2e^&3RFc_fq1}WzZwlm7#IQ9}A}ouaJsSan6Q5=J4eX|)WTNpZ zDfK`Rt_A8bnwU{WcG)LMG%QN2Vb&yvsTm~HtT)g$C)q_LzsOJ3cXOV8c31ECtyA~U zQ&s{73oRJ=O`P_O7sBFhl69(A8Jj@p$aj@~egECB^M8tU|APhiZ?Wzl3-BKc@E;5C z9}Dn*mj(E*v+ExV@V|W->p6@R+`kRG?VA5r3-I5^ede0=ff!ow4S1>xf~#)TI(ptw zE}A_8frLHd13(~6OVICpLCP&S|IQB_#dul;s)8d4K}0>>*0X$dt?;KinxC>6{!W@4 zG2N3VcZI5A%(uXXIZxJ#YxD~Gg-f3w+SqBa3)!}I`keA5&-5=wT~q$euMgcE-Rq07 z7N=72***(iKi!hG1Cs|H%$Svik+*s+pYrSz>eUbVr3riBo@L$P7pFJab{)Pb%3=FX z@9LAIuXj7XoxrPX%N5->zwP8BgQTq{wdOa~x3`4^?J2wp_l-w+wTY|cO854GoRKM9 zmW(UF+PjH?0o)-m+lTfVa|@T1r55fwD{HnFt10jNIt81Lb>a2y9l6%BZMo0ox4AG- zV~x89^5Tx?>$Irclj#orjg)pzZ>`DjcCBgg342ww1T^JDx8cV;5n-<)YZmfjjR&YA@~7eg!`4HTuwZhZ=XnxTdxaT9HVpqp}X zDtnX(KCI%%oN0Z(ZFX5d?!5VjMY1FB?EzQXs`1!zOqT4mczI|{9=e)E zKg-g;nyue0b}|-vtT$%(S5rc;bh_+;TA1C_Oc+MK&Af$7ubiSxHip-_o31Q;X0Nf* zd$z2C?Uk%^euj{(mm|p|M=a;9ufPnR{l=QF%vbtcroOK9f-Rmu?7P=30p(|M)?Ey~ zT3vNa3*6^By`ExTjMy!}T{fe)F9yu;=k{(d#qP`jz1ZQ+_!UhhZuL1BWS^O>Mol^(g8E zkw_9r_v*GYyI#W{7A=x{r?d1D1>JC6W~DwWY&Z3yIuDN=5n+EV@F#)b=Uflj)BoCUqaIn zTkc9_H{HxsBdjB* zA=~}|8`+;qxPiuX4R_Srr5!oVC3bxEE+H~unGKdaA%+7Pp#cw=X=hdw00v2iGlZjB zN(F9?v^a|EMYuC-ECzgbd5y7>CR@C(nJWY#9v1sQxIX$8qMVLFFir9kL4mDP7?^~eK_|iNF(N|Zgfv8?)JAdns6{G9xc!@KAl5>3*(A&Pe%|b$#$>_ zYSaQ45+(xDusC-<&JWJ3FhDLIF0#%Hhag?BH5w2Qa)~09C9D;rg@wQ<1d&KV4B<${ zuS`7M08jzKGS+GnR_(BJ7KXWIt2dO7ssE8yK+?cGFyOV%MFO7>p$LR?)=dOQ?`Zu@ zHlHgnw(K1vlM22V1PD>pP*lE_$R92A7GNLzSCKO?p?lgc2)#>&o0JsJkiZxMs`$`6 zWL=nhS_~mdnDH-)D1!an)C%JKgfRf4LdqNz83Zyx;5gw;K!iNhdK{F@KrXDx5wr_I zzO`Q>BxN~=Vx3Y*zaA5K;!h&O5b1JuE}F-evz!H!hz&uH=$ zSq&(Gq#$lEiDX>C(qO=;7W(E+3kn3|Q55`XZdmQc< zI)|yk$kTu-Yz%HKGCx#G)dSgw5Y0QzIMgrQIxoSED;$g$=2f2D@O=WGJ|U<)SQnN07{K3u2JxX*+5k$KByGQGz--{PR$bn{{=-KI4GPZ#*Z0SIDDUF z0Eq^z`F-U?Nbrb}WbN0>ouAXL4T*jgO6-efK2LZO$hoK2hC##0%lUHxx>{KfmRn{)gkn@mt1q9w*A}!-yB9|q1R5ryl<((oD;Ugb$Hvbcg^D$ubqv3!DpR@VC<;xU~gsc^VZ= zGEJEJF7-q>%ak9`j@jI}D?5h21(V1F}gSlnSJh6jTd8(BtI?mK>LVEHq*qGQdUyN<4yJPi0AFaGl;1tPa~on zt2ZQJjFSN=qDWMHJB?Wv00fG!)u;AG+;mMLaK)6|`m9QOOqQM{w1qQ!&%READ znmuRufEyx#!<;%xY!OOtM6I(>5SYAQe<}~|ErhVMe3Dh`5HiZmaMxr(N}N^(mWzTv zDUX{Yin2wKPwsbS$^IqeBf?=q84G1LuD?v$6JN!VP2j9<1SZ2CAS!2o^q|>h>GcX` z2JQg$IlF!EOLWqfZ&r9hySCSh26)oCW2Je*D`-fgI_E*tf61R_0jS$(A%a~#b5yG#J)0!vER3@qACbqfUN znzl~tFd;K1qG%#RrqRe$=2OJ=9nU1eCs&_oj~e7Ur*g3bD@NsS>ZZYHss-= zXT+f+m&%mf(k56uD(Y%$5;{baB&&DE9_6Z^-?wl)Rv@)KMwO#pAy>a=5%FY^ymDk* z4W&Bi+mNrikg1*;m6sqNzMP&ROr1AzV2y-K^h6y4L^bNjO|5pQZWr>Ov5Kg&wf+!O zn2@^K*zbAn*q>h8sy5E}AL_hEO!-QRq7hYF!%vanMQ+pZ72hyhj|6aI4qUfQWOU8gt6g4~624*Hpn&N+kuI?x z`R0?$I8f$jC^+Nh0Uq#D;X1O{EjWN2D}wgkQw_i=0#M4#CjBHxVF#$=Y1N;~mTWL> zs7Ur^!uA`)AD(|zG1J#X-}cD|_o+U)u725khQ8a>#Kr>{BXBcq9PHHl1vfuWb?D&k z6=^DPPTsk{mU}|%Ul74Cp~xz*kV`A@i7S;1`v0(ZPeFP|+rF^Vwrv|}+qP}nop#!` zomSd5(@xt=+cwk2d6TTQ&e`>?UAt;ueXA;0|GTd0i+9$1<{0BQv>O8Ns%;^x36rds ztb1>EjF^ZXc-}sz&M}G^c*g)avnqNX?%-_k1q1gat1JCZ{QvF=blwwt>wv~C0|!@PO$QGd(J*P(xTiv zLsh`jA=d;|V9x?-AY<;%0^Z{TGyI`|oGS)m_@n#ACx|Lm6uN#5L}hvF_^G?MmrVjj z#}<}lo{e6_sTlu=LChXTp0IRoz+cgrYHJdKUbzQaAqrs5-e^tT7 zun;@z(Yo2^;AMu?`Mz?2i&?dV5Ow8mj02(TwQ=r-HwhIG;&H;W+};{3AT;=K=0Vv% zR*K_Ex}Pi`!@X$>Kgw&Mg?pJj;A)5)6YrL`nfXaIn|5J209m_AXly2(h0tv2nludS zGgjE+yh33_ZAJF6TZvOBb!x{?c-3=-J>PoeZ6JA)d zXSzwtfK~`a(_+(=Ebxa!iZszQE5{!tiiHcu+UI8NEtt~+X+;ZP=0>^eXF)GU2%(>Kz?v zW*_^jvw{pdoZ<}I446rIgb{AAq6`X+7=^73ja6nyAQw{{76rXTLlC{e7@igqBD9r~ z$ce~0M$YAJm6B{kju1wO%qug6Z8+>J>;X4yo(7VC1=K1-2~(-vbi|K+yIz*%M)c%; zo+OW0A**o7b+m)Bz@M~XRSClU4D-N53W(Z1hZDVMh2SIKejK6SzPtvt;!MIyWCdor z0vEG)rU(@rKlFq)`~p)3yS{vSxeyH&xTLRcJGDY@UaL#%?me?y5@7Xl*!5kpG#^9n z_9|>Ho^oJB+|*SgAO>4YKsD~LqK<{Q_*H&PY6-(ldYFWXh(y9UY1mT2r9`R{%0D3u zSH0DMtQ`@*kOFM$>Iw~L&uSC;5}h^3zvW1ab~bG@b&v^#)!vd}y^aI9P*7TOtvc)M zk(ULx<+GcAb=9?X=nCOYvD8*m?kap)Y@vXHWEzf00-3a#@EJX%VPIMrSI;q`m4g$SLDepW$u5nQe3 zArod@?4*oFeTVZq+=gDRmeOmc+Wj)dhU)nC2*0-S0%TN*{ElSa8P!k4A6n-D2{Kwg zu7lo$rvwewf4!KJhZUkQOe>eyiErhqty^M?^4?|Xk|)vdg60brk7w1=;`|A61d{r= zs0i#M4U;Nn*R_-5X8Ah=>kHif;+6i-J?rl){ohymzpwOvU+Mp|Ug>|IU4LKc|Mg1G zk&8OY3m~KyCBP-7G%8zN=O*N z=>Z3HnwY=;lzjT&9mXe>Q$X+`;j0N%{F%wZ`VhYbG9kQwp!1`{#PleEJ+YaNVs@}3 z4^vi%EjG(tB4N}4$<1qj?L2$5OdJOxZTQhpdtv(H&O@xP`w_w`*w!`=86T?v=|sre zkPc)#eCGI={#2MrG-)B@lNnTOST{)yJBVDBAdivkLen={(LP$pI0(cr3Cz@;NBW(_ zCED8~!|em4=-_Oy;wVT5LhT?+HKm&@dbbOOD4`q*O(qmL{_uIiZ?-x}Y)rQGR5)1S2W|cXKc1b}h7x(;EjGz`23+UXV_&v_P?81-_sD9yD`0#? zxgB|kUzp_SWZ3(Oi-}hd2Rb7%pqlxjXPUpD)e#_^I4WULXZrg5&0>(dN~PEbqZxg(XXW6X$=h>aG8qm z3-Fa<9ml;LcS_te@H^yy*M6KW5*8n1irbqrudPyNW)f;D^$b*c3zW&)gsmh^7(j^a z>*p^T2_@#6R0bliZJAr+`4k0xHHmtX0c@7~2yZ5t)l-%9Bot!A(?6i$^qLYYO-Mm8 zrvcmBvHyVABz;xxYDD7E!HA~a_YkGbpM>tzat+7y((91l$r;gaWb>uH^$^EbFX#qr zPb>{fnL2mXS2JH`M*zAAHL)@IB$-uuX&_qRYqo$KR;w-}i)K$hM?7*KJ7)@RDYig# zDLPlCqW%{%p=$`M5;3V|Et7j_(zAPJm0Ubf$%|Ulf*JP&9a4;TS_fHba{NE%tO3zpAOKrn)P}U&?X@m z3ltcAF~QksoU$%Ij3~R6YC#w=OF>P;xZ9t}|$Z-RVrRCZ|2c>-4<=#K-`wDOplSE7#ib7gA8^QbQ%6R)RuF5H#t z{>FH&vApHHC$1GeTivyuRV#PPfv&~;=`VF6D_LFy?8g${@;MyLJu|rF%!g7?s>?Ye z;WpO<2hY+%K5m`CmcTbFVt+Uua2C*JgE&v3yeERJ4@mICW)0E=nw%t&aiN__J7X3o zJgTlnKu2H6rFCd!v?#OrB0nd$HfxC6?r^#VqqCxahm$q#vjYmjakue~>02xr>X?;6%}@0~;L0a~#Yc7C#*~pSQ!DzJmvboNX&S?jh~T zwPuNR9jq^>lRcBQOykr|FE?s=uiPxF9n^T4G@kjJ%f@I`xy(5utO-`nTYBh? zFS}gWo-H@F2)U&VZVi6PMm@YeaOzK)btFf39c%=jFdTK_?#Q>Bw7WkYzWR8a+d9pH zs^E<0h~8Nk!V#GQK_qu^LO5X$&g3#=`Wn{-k?s}Odfa|b=#0Rm&Txruf_~_Wz!ZG} zX5x9w;&*b*a`{uxjNs4)?#mi4MuWCp{=8C8h3To~<1(@=SG1fPO(U`pa4|c0$2psg z*4BXCce`)W58N8hogvX+`#yS@Gak4h6>{m5WHpNHTFwb?bg+G2AuDguNN+>AI^vGi zYQ%HwhGn6;baAdqvUe6Mqj|lH^Sv%hJqs|4+d+~$7v-pKppbQ(J)mXW!wjOO=8wZg z3|}%WAdD0Pq2r48Y7C@_?bAdsA@mw(B;XQR1SNi?+O8CJ zAR>O3c@RC+?-aWVVKXkH6fj*jxNUNShAj)7H3qe!N4C9WD+=tgy~Hr{RrV0{Z6@uv z>)9CBv?|vI4a!b9d#iL3m2V%}!EYtj^iIY@Ju(2}qPFm7B3#leGBrVdCP>1l`NDP3iQUATHIsPGQyA=*}K(V?rP>hnHUSp_Iq zG_grA(G}xQvHDmkVx%zev00u8p$4Hgwobl|Kyo-9kK1MDh|M;OAY39se0Qt$Gta^9 zk0c%~{Jvu3xl%d8EIYU803PG{i<_lnv7RLu-8zO+7`JQ+T&?g{XYLVh=8qXu{~s42?!A#QKhX zCv_4AD$;=ESGAQEQX7k=v7%EG()1Jf3=R9qPZaWzqy>37*Pw%nUi|Rj#^jXp0weM$ z{;H`j5pLV=fGbhFriNXJtQ-k?I8LLj^DS<}t0=S&rv2eF@iq%VhW}cpso--Stc6Qv zN0D+i4V!f{L~Dzoiv5rsr&IIzp4&V!c~C7C6g{cE)yI$e9eAlI3+Y7WRE!A-#K=)) zq1KO?;yp&UUto(Y-JM6^vY|a7#j?Q?!w$BCcDIuphO%s>=n$)~tOsVw^CYnXX+`3| zmk{U+UIHz3924NiU%Ul5Sm6>t55WgPtFb!;R#Lo3K~f@_TV3XdOypfh_HyDT)(u!6 zA=;Pi40)9s%OqauL(G6x6RigGVM=rovP&suTl^3Wpi#wb1v~i2I8EbK#?dDdVXu=! zw@8g-SOK=q`DJU{ayJd6E*asqg)1$>;sx!#YJjcJ)YfFGC;DL$Ll{;xRjK#>c#}ZZ$k6ZCM5SEW=tt2OQyABg8QF(#U zM@!@vuaj^9yx#c5>qLc-qNJq8@sli8nyZ08g_&$26|7dxT2_zYA<|@_yh+Y@TuoO$ zY-Qe0Z2A5i!@;`E)p4+)e51mh@{>FN^#WBlF;$lqXR^yDpNCBN$|AB zrI^lA4b0ypVW^mT45V5<3JN}c#!PmFcwHVG-`Ztz_od4NqHzN7`sXIu7Q4^L4BEjj zUe_8R1}^}3eT_Jxd|w9OHS5My`WHr=*9Yu}owcj-1@JKdud#mf8k%H-VPZIzpNHGs z%&nlN94qYv1(TbT8-)+Ij&dzU&yXCJR#1;5tNODba=QvwlB6|hmX@dn z`y6ztm3HZ&cTpxN&`YNit((o|_c>0f0%7j#j<3#`7o#(bsSr5XGMZJZmniHwryM{k zj$Tc-!9uJb>Umh)y-lk^tH%ZqA0-Pa^n)>7U3Ol@J+I_{uCI-+uXqrzdzp`d zpal?1JIW8n7Kfpj+8e5ud|~9KxST#30!!Zxtl7#PxLh} z*T9Jbk1PjlJqP%FUXNv(pO*-1@`{sU*4QARrHv8P z+&ngzYHV->Ql$7P5XoHFO$&8%1klT^k5ArSvO^Z*s&CM=f>)DXlWvF@8=9+C5$9{ZV={&T{T*8;HOm3;Bv=Hx&(q1s|?XouC&Gjvf-3mv0kjY zsU@jj3)yLG?oiE|?D(i^?w`s9U9-9!sf|?nV8_0^zAPR0yfS_w1|}qRZpWfqwPO9~ zvvLj3Ew1)${=PDhWT7^uZ6a^>qAmO6nRRyFqvptJ0T?VxFka*W##6^=T;K6kiT{US zms?o%igEeHL)@~@h6`Y?_fY01A7%EzXiq)hL6;W|t_W-&S}*)yf>(Y>ejm1>l0eFn zg*#r8Rp9|u_4d#xuc4Lzi~G3RzH6@`i_ah9kk)bIrr+D+YS%8GxCzBpu3QD8&Ih}> zKjcRS^_)$rHE0RPR;^UOuL4o%(?o|iX3e*Sw`ODkYp%!s>?hfqRid~ck74+)ro z8mt@xS$tU9N!RFkIWlaU7_$SplxShJ25DA$l?cMZW9ZxP?S!%vSlLe=Vl)7pKe&i& zT*_}?Gc`05z2B47n|M(@dm@j=N+t9l$Qc=~4n&9S!$e%e0mM85Y-k!3_L0)4#59VC z{9WeJ168M8L=hln>ZR%#a|X{92r%+_;a#Y0)`LacvaQMma!}9soEfP%8U3w)iP1DqIW?Ls^_}K1Rx`2 zHnFO&GK?hpdD2xyu#I$_mB?*c?ntPz`sgwu6sfQys^8HZaHJsvKOBkCDr;{N#F`d) zu8kP9Zz@t+e^z6v&*3(`d7pA{csvL1YD2E#wG)1lTWC1bYIEuIgGu{XCzOkMTyfRs zu=1uw$b7sJVPC4wYWu;4>G6JVdYM9f_snpb-F<;FeiE9ttl#L|U)1W*FM>u&nlgx3 zd5O}PT3Y6&^k?2w72wCOTAF_hJU`3Cro5eg(IDdD%fASTgzt1@COw>ReMdi{Pf<|; zLGy9JFv%Q(GP!eT&{P%OD0>eMYfmS%u1OL7fuyb_6k@eQ47+QxdYSL(Zw zMromMxA(^#MdAsChZw|~wgp-h(`8awMdXSq8?1EYAW6XLEocwaHMTZNxx{yo*~70R z^)EE6W+r{~I&^xk&vr^ItRnHS=FH z|7V%`_u2K=%>UQSmPfN(05jYDdoy$XF*9HTQwvt-h0KQko{8DYkpeN*F-N?s$?}Bz z4@sWdL|0FU__#i79#By4p^5eXQjIP$SxU(ouS%4_K!!pjyd>a%d9P08*Xti^$>oWi zF|pyITVb(4E-*}@9oT=a7q&LQZY7kI(8!po^^kSG85|~fzhuG$lkV-e6a%(0NFXHu zGj^sDVY6z!F_1J)Xr758Kg=xVAqQ^$JPps;+PBMS{jF)ux?FP13HUk)$_SZk-o11D z;S*mha9a+zs0>P;dJh!|FzQgMVv2e_m|X0)X0tzJ#;7EWUh%B6QcfE?YC6%C6)1>< zJchNMY$3F6xr9f+{Eb--uh|^?TLADIQWaqnf^O#aot|DK&L)lQ?AmTM)fi5 zBmJ!Tp29I_BWq13_hn?b$OC4i9H|YKJUaZIvmYU~x(F zWoGH_-3t)&Feu4nz;}R{({{Im0Ae-+5hH zW2yC3&nh(kY-1~$J)zrfzG<~PR~fgJ(HsXJsO$h#qm9B&q9zO>2KM!L`iz9+#fP;Z z$l6GT7Pta*phQJ6e`X}8MCjr6C2)aMsXPgZ88Lwv1H>HgOU&@U#QdERej($qnero9 zjoH8eyT$rk5>g_HT%ZK@@bn1x(4qg9SR#MKh>;O)2PY1uubx;W*)K6O{}yv*Iv|)| zmWs+cbAj7heLET^uYfnWgvqJH$oe`VPLeuU=!P>MwG@W|N15&uU$H;OL~sE>%+X2C z)}?DlaV%TE)Z25Qy4toEP@-1Wa%BYf>T21cH%7o>31abhX+p*B(()@sVmHe799+ zzB8v0-{ThD6dtdw_Br&;Fb8^Dlvl!d4tp4LzxGM zp|78`J%iI3YG7?G1E>27GTwaOpYBI+c7rgaLYXY13zW;rf3RBxH!iDDd*&RG3x4RT z#vP@@dEs!0K|Y#_*|$Fg9(exxX^hPH`wrGLiFcj?yPu}+>ZRFa z#gcRLH?>ZtOrc!_64O`ezM4uL0>0@;->Um_`@8jX=BssWZTt_W66=y$+j^Kufdwou zk4zmrMKCfyQFNVc4<7{_KF7OimV&du+43RTOSDrj*5Ms@)#B|7P%o6N^xb&%+YB69 z-X7hl3?_BI;DWfZ#(Oe+RiE*d0_#mybP#CEc@5NSp3UL<;d3$7-0o3r4R;?JGw-H5 zc^{(ZR?~bx3b%dE_cSyuvZ5>MuRS_D=h#N#T7OxTQQ!ArAJ28I$`TZLd&2+J0n40d z)2{AAyD5x;eE6rF048e=sETQUbk>J<0@JemcyQZ$!erN)PJRuI8i1K202OIl9RXB8 zk2hn{M;-fH;Ell0S24Hx4US%-nAdy$fep)y%>$9VLt-{?yGfsp|5wc5T~eABx2qGI z6aMGR0$&(PZ#AZ;oD7)Ha?U-C532mVVNW-hr@mPm3_96poL0UX4--+gta>T+#eO<7 zK*fE5%V!Wi+jLRrgsNOin%w7)tGah=MGKtn#X*tco;oHfyX=RJ9R zeh*JOiU0ItU2w1w{^`7v`_!!sZ^K@rb3LCfXCaIku;mBYoAyuJaP`W)Bg-Tl*SC;#aRL{~}Rff%#m_JqrW=2JTO3$1oGDF3TIl#dT%U#T&}N%0%r z_A1eYs(h#L7w*1R;4rtadLHOg#Nw+CLOFM5Lb=brnthy@?kk&v$K6v{4PWRL++G!d zszVVEpmA4`9qs&?biYBREYwtkr;rJ~w<#3ecu>~!CHl}A#uce*$DP_MQl^*^n&mPW zGt4Z7$uw)pc?1xvqY1d#ic~+v7`xq4c~qKjesV$3n}?;{k>OBXiuqqp?8&rk+@CP(_=Q>EZs!8Ie-@HNW z6`qcMOI}6Jtfi_WG`+@7N7Nl-H7>~*woIn}(Hn_q`P61W%++b2Pz7G)*IYsbYgCL= zD`V{6#g~$P2ppnbWB9d- z6j~~Kg0C;Oy(3cf*A94M9!;Udr#GLqes~e>Yi7qOQZ{`oC*RYDK-AO`ep2N;Wn;~* z@5lR@wXyE+mH94Sp>o8(-JW_yiBwEkDh{cUWt~#Cf5srIXj2hmgQckuB#EvVuutn0q6D5-XWc~x)MS_TsVEx;(BrMk(sk$Lu~`02&ieSjV)dW9 z)?ZftW%XZH|7G=m7OQ`sU4L2qFIIC$Hgi)0SS=1b|GYkX0%{ZjnhmHR%Gh1i2e(lV;kBSAE1hYc`y8TA?T zDy)&aXvtQZ*i+Jv$4@|{w*@=rjEOIOQaDErxhu?KY@U{B1QG(`+B9t+jdkPLEDHxU ze`+#;C;cWRN5odNLJKua)9#zzJf;jZWbu#gPYp1=Vr`|(q9i0&LabAZ?rvq+^#x%d z*zS_(#@yJG;%}FJjv4)cK5xds3h6(6-b)_w*lBhVq7y>s*EE3H=@;CF4pU$s3;Ux` z;xP-p8Sl|nGk<4&@U#xst~IKAw8+LUh7;A8sxWn9F&ehte5$|nCNZ#)ytQu1?rc3x zpZG93WMLU4p^`ZE2v3x>x1erl9)2909CkImnd|f)8aoB91uV!DC&~b#+o@qna@WLI@*V`CJ>E2 zmTI)MONvT+CWPnsO>7vkJyP%-r=?BGOi(9n^h?C%SqYuhUE)$>P!wNFE&Q={ymZ`e zTT`mc_W2|jX4f#Jg+vYGhelb>{S7CXbS-A%a|=KXM57nM~%$v zMZB}gQHR=sboBk@D5t}_N$Vs_T*y{brMf1EU&GVOEt^U+C}C72>NKw8AJZ`@gb3&G z2Y(GumoP2~9XKD~?0kWF6Mz9k3~5WA>iGse`UE`H2NIU@q6erf$thj#%B;#z=6^{^ z`T9wZO8P-iEZoUNU6M#-fM7ymA4)($Pri3%KUep$84Onkda{~y&Ri6}wb=i-op32R zwZ^3XN)VPxWsD%CXD>pBLi59(rKd_ENKKmkkxo=vtgk1?>DAlb2|Kn7p6N2xcrFp$ z87O#XVOD(3+wJLPI$Svseu@vp4J2$*0&l!ErZ=HK3kG{HdXzL84Hsih zn_A(%YZllM1*Cl1`FncWPlMjB?iZ}<*HoqX)A0p%i}M=q`gn_><-rtUcr%J|Vekmi>Y?{#LbQ1NGw9f54jZH>^W{ z!}{Tfywq6Az$eDPHG^`EU`Be2KNxsRiiWAXb)!AM${JumeXD$pCTH}KBMqP@uG~1vs8-7gbr2ajf?T9u9a-MQ zfP!hT8$7qiThzn}nIa&mhCsK{G4kM%Sam%%x$ zsco=AL{I8+@7!uTcz%m2Y3gIsZgLrgJRD?AoRbAp8_`{~*tYp*@0o4fl6W4!l?F1& z!BfXQpjgd(1nb#k?WOm<5767a{Y8u+kDGS;Qdm$wCGD0ScXoG>g9!oe`%zoW^aY+w zw56{Rd>hK(8Q%8Uu|j&JuFw=Oi`L`1 z`za>`I`y~uG1z#5WtWJNL@)+sm+62mCJtikO;|N4x`c7KCsi z6J<<%On#`zl0xsO@74u9MMEIHh{Af2bqmvv`EZ{S4Khu6OAe+YnFLH-a!SR|AU2+8`hbn3ZN?A&lZZiNMw@t}BE8$#1U5O>kDI-_TQ8t&Fx6Spv zSOgkCLOaJrp(f^y^4us1DD5PxwB{6>EV2snD2cWdI;vrUC}w^Qeq-B^!(qn5hHQE0 z_SCid3z&#&$=3G752QFGl{r~ifTNXEdFt+*9087gXL%#|AIsDK^>XF>$-~CHq&i8vVb*(B8)0$@$OW^;gpWlJpW3X013t(o+A8q#6I&)x86lm)6g{ z$b|-K5+%}B-(a}wP1zBw8756LDH463Mz$@_6M+-mV~TuTpg01#C3>RtPQXt{pyhTo zGIw=N#Ci^;i{D1_aHl7j;Y7$$q&I``W{RpfGdFFNl(DMy?Z>Ct=Z}g{Ed@XEh113GZoQE;G1pM zVX@l4wizv#ig;;5jj z!0fFM)MM$N`@3Dt!W5OF`oLm2dak$SKG#!fz^>=6CxzT&$QUO@%1+g>q>0e(6jSbC zg!!*(woL*1!0R&$**|lUgPITlTOyuygN^W=EYk^F>QD0Rg&FD!OMV#dW}Fzo$@$(v z_9M)bbSKs5=$agj?g$KppD4TI*SPeS;zH1)%w7fYW7hpoP!-t{yvQoSJ!DU+>Q86b zK_5c=wui`u28$`blOmT{_WFF)6O%-hBNp0?o)SJqDQcO#pnZfL-eJZrYf3Q(=49RA z;ma9UsaIxW_cxIQW3xL06S75)-Y9klcg?MSXO)BPMdOV6!Ns72C)o4@$wY}#YG7yq zwIbogsGByAxrU~&GZ&u#7e+Ql08eOauqX{DRoD<#hbDx7vn{UEiiAx`{aMK|7=a!W zp4gDki{?V0gCfNGXGc8lKFHbbnyUEo4ROXthS{$&nSd+*Ybxsq2UM^QG?k`yiRD!e=V9tztIU5D@=7&BKAJCMr82AQx^9udb z970gZix#xBA&c_hR%$hhu?H6;<*S1pRo>?z2BnzO?0q4VfOS+x{SN`#p15di5X;HF znSp`QJvTH^a1z=6t2+uV>x8Zs%0lY61YtzrXfU3N?#;vtNx`{1F0o~wKG5!a@t}~H z1rkgEd41;baI5k!{VwvY50<4MPuI`i-a`E_lpilT=1UWzD<|;46F||JSxy9B7(eQy zWEhUHriIki30{~fB)o5Qy>(oJ3toyHD^OR&J6Mnzx{#P_l-ibljZL>j1vS_$Q1&qJ zy;nYB1bD zP{Qj(39T+A;83R3WNZY#5x!q!$-`dodO1!rGWM9ZO*tS zsOna>!^g1AL*mmq6}TxDiyI{0&~4Qo^t0SIoRuD2$UQBW%*z+b966}YC_a8tmk6Y^ z9OZ5umn8h#%dE>BA^%OOQLOIPoY$R{gND!)MmyQh-?JKd^108o#8#SgH_qDh%vqjH z!vWxm#c?S(lZ5B_&FG&)PTMZ56yf0R%_mFQRbasD6CdV#OuvMDa7pA8&}M>IrBXO| zMl=H>1G1h2e0T`N)2=lN1Xf5S3b87x5ol>`0vo#b1_YE%cYsOVtmI=Ed7QX9^0(rx z`Onzfe~nDTJM4D7Prt=j0`p@a3m(+|sMX$ID!W09k>JDwS_#(H!~vv`DOfMbnV$%F z*%E%Cuf5bzT&U?b1Ei37<=5L9;D9#~08+?sex;D1dzrs$oFY=TXm}+IdbPPOmMD7X z`w8uBstWW%7%(v4SjjaOxq#M~Qs^#Nrp3%x@?{k_Oa_;z$jl^bt1Mj-ufKQrWY~W z&igFnUw+g&^I*rk4u8CMZ=EJYg>=TVK*uixXNgQP2a z8V0f8+y1tUM7qS=P#++S442IOu&hux_yU#SO$SWE{bbtxr&rd84HbA>vIRaIL{JAd zGjhMU2=^G=v7_O9a+TIRMTcJh(!iz##^fp4^n5>p+i{r@%gZGG6`ckLeaR3CFf$D# zD)_ZtW+v>*9X|t&iT-NkZp=^;EYYYUX))1k9&}?7D76ujPEkPerI0~V_Fj;9F*|5d zc?eCnwhAIRaS@X11Y`n(ZiY&Y=|YwrvC4luN!M_CJ%5j9%TpbMN!NOOmT26|5wEtBeU%uQ?6 zlY_t}2=W=XO_JA{x;JCSc+S zca*wU^=-}pFgbn9-qitePyg<%QQEt))1F#Kxs|HGgg|8AQKdPQ(F9efAVV8#LzQ6+ znK+gYYITyQ(>xtg$9A=lDTZ?_9I1cx5pgLmB$8V^)GsRcs~xr0WgU;I=#}+HjL?qb zj|zc+4Zr`x=8%8K>_0cJzs&y2?7z(Z%k2L!X8%6C{xbVt%%-kxIAsSg+x@>`Hq-xy z*&sHNLQSOForb5um3@H*iINP%GLdgsQ~w^?dZf}ZSscS-c4mW*TWJOEBKX;(gU&Jf z@9rolJJ9vCUx%kYVK-TBlI8+h!9(_+BTMI*qXGNZMS{rPE!MZkUL0=+`P<>adypA@ zvcG=75g;7~@0rqZ%7)Jvr`6cTG=>JJl6q{|=@UJ&3R2jr4*%S!gmfli@7 z_EAbjJldo_{^|t(ED*>u0Ht1TkVXiPJCvfBfi?$R{C$-hb%)Y`CEs}+(qdJmFG3cG zanL&D8#Yj)P+#qA^U44kvG6Qg=GVkrlRPd5qYi%ctf+m-FyWj!(3t$|&$zGyLB=T` zsB*}0)hZv+F8ZzjtHpR44O*#eoY3RLHb!hS^A`kG^p!S4ui$-0NJ8rKusF`pV3uP| z{nl*nGOGC07b|A*OA~!&5mIBT2KxH|(AES%8w1}l zU@sNtYWS-Gr)M-cGxmBmW1u2Ee9zE~0d{Q^L4H&Y5xCPqnh?wb)#ov&4*ZNX#JFI{ z0>N7l49aI4>;>3DVdm!~k`P%CGN4Z&gBe4$MwcVXb9I|oK5b@36|w^8A~MA=<| z^8jd1H^H5x537n^GZSP$m1HPzqw5BYklpI|Fh>KjzS96%-_3xm?>yKj|&{Nkm|b(S~%SjaG*=e0&$Xgl$a#?`TxxN-c%{*tl*$n7#BCNrsuCCl;jHl zrAYI-6YmqutBVDKE74$06(M8&60|+m54B2GmJY-`=fB3uO4P9@xL)6uQ2zPTfCC~F z2)BmzFvx85@zQY?_H2EM^@|48s?+?9;1 zky|2C709H7coBz!(+_HhOMwT@xR*Ook?MZvqv-&5KXkl7-MqscZ=jo&LMLz;6G;T0 ziE`Sn+)Ymqv!0D@K+VWF z!3Y9piD|w`h&=%Iv?X6nUcGEfbm{~?_7e$HijjMdkHfi2H(Sea1WqHQZl-6`N+s@> zymh?-3EjzDDACxYIr;)>tcK81th7rGz0C-~fDFKolFk<|21}ijAkE!bd?PUo9;yMm zBOq|_Cnd;rA7I#Vb}50Gi#TLDetb=a=hVUnfp0h2(HBisI_@Zek zcCO{r0m&PBze^ALD*)clzu;Z=P1fBfVbTmVM?B{ufolMAY?9HSiFyP9{6XwNK2P9s zd@9nIIkPGc9{=CkH!+iG?|R+-SrdVV#HYQecA{Hi9P zfV~4ZWVNLVkT;Nf>qTp}B`7fdBoJEf0tXxrH3f{{fC*5GYVK|^;T&=V@Q)I}r)51> zVXslDwfrB`)EXKbfC_eGpnuiO0GLGf90~8hgX#)Sd%)z|_dmnbo4^up&jdRcY2=p| zGQ^iWY@T+5e5Kq7TVYAFNHzK`$&JJIlu*$zE6283hgGscD^%$CG{@TTi!#r}w)Qr* znTx^UYhL6{&9XL{F>t@+T_O*Os&mRayd&P`%0cOHSsn-sT?i;)!x8xP4B3^iso$l;nlA8Ku~H|c$-VIWmJU;S#Ese~K0=HO}lNsvfQ zM==^W%c9A?!Lm|q^6s^15APFvWz*Vy<*?ysBmB2>|CNiT_LK{C8?A$Bwo2V_zE9vD z$lfDY@!)y<7&>(}3$>MZhUP{;5hx&1cAnl$PFs}iZQhBi^Ksz(#F9?BmAvAhYvy?6 zfm7ZvfK&A0q|1iJJZTML$?i4k&hzgzoMOqW72IOUScf)@C3n1f5;n(yqpNtM?9{5!bI5K(%v#xPNgXAh z@+G8s$<`iti?CcqWJTZL-cLJ-g!~$1GDlDXbjn1Wu~OcD)>#7Pt6dY8jn|hn4(BbM? zRbIS29J;Cz<9#?AbaIv)ie8e?Yc$R6Zgf;`><( z^iiA7m1;S)ZAyQ|D4FR$0HfRA}u|d)1DJ^?OuT_$}t2=ab?nVe? zqW|ZGod1s1e{NcTS^byQe_8#P)&F6v{(X4;W%a*Uy=QIbMF(KD*MGxm=0B{yuv=+E zd*?H_$b|ws4-o{g`Ym)BBODo{M|LGw@|&SPv{sQVT~djXO2)0x#vQ4bA8#MN=|3{b zzPZe-u-cv>G>JrrlTLR1Dqla{xb#-gP+zAN3$IwWG>OiJ$?tsPYtYw^%F4FgODIt8 zb(adggBw4~5$}3mo3%is3yV9z3NHa8w&V*7xf2?(nX=iR4j?DBzlwMB6j`V1}?PSfc?=GYtp=)zS>az@;2wg`tz}4TmQtiYp8)?iv5mzyS7lbFgb7&?*{p?wBE?JM ztQpH8QE1n>Z27M}K#3{^glN|)MnCp^$hgnkh5Ah6In(Sy02{Q>M{zoAylzn&zJm?= zXjC75BSvQ-SdL%lTno7KX74^@^OEh_r9nSwglKIyoTD9b_HJC>zwYpaHtt;v5=pfJ z)UQ7hbeyS!EU*hl&lnZZo6us-V*qx?^i3PxCc%<__H-tv?I|=0NH(A5KyT$CYvmyU zb~;#+g4vb+JdWwh-#}#?7mPWRyIc#S^SP$-KEOnn+gBY?&^EFFDWld{NrgSHucP>f z1UPN#fz{CyxiZ~Sx&ps4msjx8F2SY6O8;kaPqo@N`Vd!WRb?xRZRrx7k;$LQwz_}f zzEg7va^k+!X_OYh_#_BUX!76+9!Al95IQ6`Xv1WxM3{kN)FI ze+I0`Cw)S=CD={lTl?K+YWmC3g}SaW>2z8dk|T zxlmXll6!C~+5zEYjH_igQWq__niL=DJ%12jTzwV{MmzWsBz5?d^Mi8sQ`xSHpL8v3Kv3rI{H&&|L8k>C)L1OT8 zWpX>cdY$8xOLpc?@5pt=ye^-SOa=Tjz)&TX9=@GGyb zQ`|f+gLF^u=iVTOOzu`W=Q9a1Rjd$dN9-#RZ{*7g> zT)xxnsSWW22>)I#7%}em9t<4+B;;c3TeRLgk$!nlpUOyCDy!if083#TRnT(y|-N4Xe-jsUOjE~^O%Ii zptT>PnOKQYCR)Q|@COoF@v2xkgkp*Jj}QTSJNC&{6~dz339;E?GyD)+3XWr3Zrf%8 zuFyfc4bJl?ai6XgXe~nxX74Po%uZLj!znCO%a^KL3Np3VMy=>sg#W|dSvBR=rrR2K zx8Uv$!QCB#OK^902(H21LXhARg1b9G6Ch}C_u$T+kgva9d#}}NSM^y}<>>tfPtEJS z$GApw`y^6x?j)XjOeUlHyn4OeV(2P+*hfUSPY_xA*EKF|?Dtyrd@z2Y9jK4@Msbr6 zCO2biueq`u+--S8 zqMmHy{CXar()e%?$jW;J_x5PHi(#4e>$w23DI;>hvJ8H{-85h-ljTc}WJQ~tOk=@K za}hIpOYntHBz_+*=Z>*kLwDR6SnX{6F?j|y8Kf~yHq3=VxZ^V5CgUj__$${Q5CvZcqTsml z)tzwTdfK#BjU;u~?C1{9Bz-!E{I#PCIR@SGXJ~%taE;mhpNIg112pU>Q=k)uih_`)#XPtoCdM_lnW1_z0HY`a#h#DPdDAR! z<$OAC*7A$&0C?d=|JNhaS` zE!OpnQ+k=TfL%75$sETtxjYX6=4Cw~e{E?J*sIc=kcKp(oslkx^mHBe%1}@Tg^^Ji zO&MHlt(*vHBm~8>V{W7yQa7PIKA%lunSNE4)M0pgl!l5(=-p68h3(z%@hmD7%>-)3 zfivBHsv0ef?|I}SsccwYSedM(odGte1zOS!F7J+(@*1N=vP_2L*t$*Trh31M2c%8f zsh8mfN~@3-Pu6Jnvm1f~SL*8cc#jG^3!ApKiD5^{jWG^IgyDS)AuSgpL`ABNLWCt8y+hp=LjtHSA*D zMfCA}_NBDLnTB_=(Wt1`YSWN61lF_SPGo@fEQgC7-46W5F$8Xz_wlVL@dm28i-Y!r zA(5-_-?Pal7o6=*#0KS=tJ&}+`OvAz(YJ#|kMLEj0=Rl+q9}-6y@I;4MDsF7{A>#Pj2N>1 zspu(DT;_@6J zBZvz2mg8SKU-jw`@htasOFT$Jw=+ZI*iin4^AM!B4NC9^_4*A77pg=8s|qH$@+ zm>C-eReIcg08(juLVe0i+oja{1E$qDQegg-CY-V*w=`pdtf#XW4rXm7ddR+v7O6xg z+F`$4qE3X|Y&Hs^6qlk7k$6a-aLc8gb7KRBufD9s;32>*3z3R6&IJRh^C)ZyK7HisLfiQSV#liFe`^O#|{a1`Y7<_jq*z_x?pmR-^ULOdJ z-b}onVlf^_IforEpQQ}UXB8)>%1NT!I0Am}_n2<3D{WM_$Kw|Zz>d^{52f$3-#Z-e zd%uC}1^nLlugpla$%zzpUgonZ5c!W`kQl!+zRYJa5U;UoRlwETgx%WPgmK4xt=r@H zb#Ew;&g_Ni)B0vF7%eRuow$NW0{QM-;`U96P$R1o67!X_!$Cva*kD!6y`w>$neW0L z!3BQVdh_R;Y3sU{u&|Cr!^6uwWIWf*pX&IHocyB%ML*5mf6r&-{M1{p{UAZzb%>CL zlsfDJmxDJBYBPsltYWlSv@#4Q1qi-ROd)v%qnW020m1hh;$*YNb^%RIo1khS44zX` z?MR};dm$Ce!eeoD3!MkP>^*B=%Xt^A!PQ>%^f++MD{pk4Nq3*g6kV~uP*gT1%0qhS zLT-_2XKH&g94wExe9`=SKdTSo8FDm$vP6l zqOC`hJ;9ghoo23uDdE1K#+!^c0)*Dwm_L`)yEiXAbX!*MtY7zQA2sIN%hW2{^hn@s z2WXaZ^F zc@21Y^K#Wn7p&K3tD$ zwpP6>BycRK$kvzb z%zLKm2W>XYNYah(YH2J7`>mp-+6+Zpbnze1a#}dnR0#&E__}9 zt*U3tTR=}PYXrSfCrqfkt7Tl(4yI(HF;Vvod*WAF7ivVlPpAQOsgf1euk+zI>)D{2btXXRI7>GjGcPG-K} z*~k!XvT6id`V`0<{?V?NwGQ?X*w6a(yq_gYC~q1vA>(!>Tjb*lj|iQ5%yd#l2ePov zkq_c3zE}~Wp9R=HyPnBL(TDh|(_Cg>E9wDN3m}dn){f{X4RD;=8hr~(zEg9u}OZ&zoXzGUs_b16}Gf#xqnB& z12jtp{CYi^HkIfZ75QRODtQ1Qxa$I!w4yp-E~EJ^1dpQi#yH~Op>|00>#}rFqRCFxKBLEo>^-fY8~OzO?_3$KhqC`r_J0^< zzdT-lDEn{9Hkt5*5CjINx}T{rW&;uWY1as7}s?m!>ex zftcCnl3{WdaI9DVi+&Wa?;xN?EWJx5MRuV*SPc{mY;@2QZiL-1c%5MuKh<4d=tFpT9p(Cf}66i0YUIGJ5r>p z7)`Rh#K7#xg5eK3wHxMg_j11u_(uw_5n^vSM$AdglLwET49s!>Z<(eOEN$vHXHQey zRALBb2nW?mttwP1}W+w~yK<*_!Pe?;cQOscQe7|<^GNRu~ljdek3ZNnKCWN?8!8Qb*xZ?HYBQ?Ui)*o zE-Z(R1I;!@a8k|!1)YiomYHgJ(EO^LL%tn|m011C9KyhT<4T}lPLNZ6&gTu+BK~d< zzo7Y=kl*4mEd^C4FA_T><}Q3|-dtS`1!^Les3g3Wf&!HEMnPeEGzTGlBJ%k)p_WrS zX9s)kYafbS78>|CZ$7n;8sR@Bk|rn`ppC+X`__C}+q4p_zZT~3$@>9~>x0dg#&r*o z0;uOZJ}-AG|Jy~XYShp zK~~UQ`*%hy`-5{25x8A{{iZo$NgRKSFZ(b|K;s&u9K=qu#{B!i+(B_c>WC!q>AKk4 z4mdiS{Z8-k^JS)sg;lSP!*;Zewz^HwaD)VR3HCKWjFe_sqe#DV!IA}Dl6?|k9h4Od zKz28y>@2P3H{O0=!Rc#=jiy!DXcDtiijhF zi#P5Wvik_Ab`o3Ob53WPKmm}w0#%3#@4yQ#2c2pu0Gab)6m&L$J4YeFF*}IKniv)^ zmoZt--Y~Z(g#qR=LLu5i4_6-pLqoiL$O;aq3@f}>WKAk>&av~_6pA`H=iv_r9waK4 z?D4yPMc+R=!C6LEw&m8gS=pO`;J20=VK*vG5t&en107w^vN9_>PsPknt_fWn7;{;A zKK1W4_?79GZ$2#T*y3V$cb)_UJQfCIZO$50HC)Deg$Z@_&DXH{e6kLUp0~HdP%I{B zg_9HmK?rq|9WD7*8v(*`V1f}F^?rgJc+2waJZGZV6H&Oq3YzUdU%GVk)0cE_ic%d378Xt!@et8*fN?@!DL z@;M+&w6`7KlfFr{W(W*TE0a=bNrL)5#V{_C@$Hoe(6`np6&l|A<_pc}zTz}zdEB{) zq{C>qOwc%0><7&VY*gz6azZ=c_^3JTHtJ#5+L)7_m&HKD9JbgEUvL_A;HrhT&jg-N zVR*{tZ@w}nT%ST%aV6m-)`7BaLrK^>Q-ukSHhMwj zet<7USeoxWnTZAqn$y=Xo%qS9U8RfR;nP09XF+_HVRL1l%;vS(NBBkx_dRL3Q9gPP zn#yt}s}^fLDF5(=24^Vv04Tv|9RZ&}urQngqd6;N0PswLLT8uyXnU>FbQC z&$|f4EvN?8U|RreRX^9RQx~zbKQ`#$*m>(hhVcG+OQ)v~SNT3_)jsxzo=$&5ka$N= zO{j!`Y-KTOq$dd0P=(LP7nUE2FtjRt(J>`cX%i_- zg{!-OM-?Jfoe8YxT^dFa&Np$BTL*smh<8MLWlWLrjGKXJMFJ$Eh_eCI;TTWQ{)209 z-@nl6Hqb0*j<~Jw75WF5K@LmVi9LZntGp;0T6UJ?FxmM@3R{Sj66+-zu^P9 z@GKKWb>!sc&nUF>y?%CYNs>bM^nYC;ztpYAgJ0^_RzTgFRpa7>?zwJ#+wrF7->X~y z{_gq%*?%DW4`ly=?Ef&xetEq9K=$8|U9#q!%mP5R7TseL;4jQDjulv7i7mVtD=nHy61MYosA=*6H$EbAz6er6Y0pM;KFpTV;MHGO$mg-;1yf&R@$zhvh;z3^Hkb zHSg90Kz7;s_I=D>TC*3(era2Ofp~%JirclM^tu+HfiiJ!=N>bB~owoY?7_}y&|w@q6$jSBmkIC}7>ZpZPG9C2ae zF~F*;%%08Q(+*t)aVl|&fk!{&z&;jys=EXL`|X}0WqJIG>3;!iEgOu`m7@Z|CUY;MqA^F> z``KUj?0WvA?TinOO;z89qBg$Y2{_jX$EXNVD~zKt#tq{MX)rEUzL>!mWMyBaM+0W? zD!>e`I6LEgmA%s{IBn^@@Ui|uF7t9J1^R?VYKfa+l@Tz5Kj**`mNHi$<8CqS8##77 z!&avLp)`X>@rzCXn$>{PO#8Re%pFjgHKvWiw1*2WQn>~KN;7nSZFzsf+iUgbX0<~B zCPLhzeV5XVYiqozT=Qh-&PYqex9oFNkMzznXSjF@A$T&JrG4_W;YJSS+FJQ8HH5MHRx1T;mvmNffB7L*qXAK z7X#sStKN5cTbb|suMd_=-owSEeY$EP@$BjAk zmSgS;bO^-Z8K2tCeYS3R-43bsD@Xyx+&z2D76fx#x#+QPy&V1M_C{RB4ZOQOt^IjJ z$EU4L({f3T&gi}=zJ;KI!zol<<14Sb#+gF7)OJkID*c2qp?jN-hLFS0{JCa8et|ls zNCtqfP5EN}?8|IwHc>8MmD?17AGEuM)}pnKBa;+)IXHUNr6O zjR?{Ny>{T;Hccmo6MDypQgmS%UL%n0OGgr47^0nX@Wrt=)l{*N>X-3O*@#%-Tf{Ox z`P>D#@}PIkh0xnrk{{DJy}VM&yjIjkiCcw z(=;>0f{LY<8Ld8f9F-E5(IWNSKzRK!?xYK$ z>RUy<>8f(T^GpNYM zfr^>5whWGGbL-$Be$~)!YxV?k&mnK|K-K5mtcYSlM zQsYU*>iYjbw}ttCsdj&7!+(Ec{Q>GfK>Y`({{Z!W7*M}FUVni4Z=hxzg|&Ge+*14( zK+X9VP}?nVVYTZUED4iAq-i0T$X0I(P7%&J=|Cj134e5h@|KjX9Yv%l#YoOZ+Oo?w zAv7Q|A~MZ3L6(+IA>eiWzRO{I2(L}rM|OI0|Bi3_x^KJzmA!)H4Q%9Hp-PQB_y8!p zbnDIsdqyht0XF4;H54VR^>)uNm9yxtF0?MNPcrap9x}ZlD|v;nwUn*(gzKKX0bX zp^U-g0v_|3Q|0HN7FHP7IZmu0FYUObu)P%I7E9cz7QMj_?v77wT!bXP$rM;}oB`rv zFcfPF3=81*(_^m>1IPk}eja}(+9}M!P znMwFCi>NVQg$FHi7fyjWBI!GSwY#M(#4ri)XBz}~di_{_Jv1-hzVMi}x5_ey%inAz zZKW@oX=N$iT8uv)HAKIT5Jq0|zs28J+; zbw&z-9qS4q7y}sUn74sm??RR3-mAXctd%9Cxk!>qfgS7FA{}S_MV>vJ3~_V%DO(0W zUgp)V>de zfLQpD77z;$&{8%x(17z1=4VM$82DkG*{{i#k+KnynWdO#h^=FtwQSsq(doog-!5b? zKZMQFxWYyJUE=b`Qh*l`ww-E&B#Rb8)R1@&yRS|{ADVMeem?@d;&F$)V!tD`xC-3Y z{`rfqdm%18A&T8&LdrDF+OOFy#|F}*(Mg`!Kb__Ei9G7Cjcp)r6R>)N^r zLRG1^CrpYVmnh_Iz>YQC*?ZP5#?4V|6?z9ddZ(vkMaocovcuscO-gafol%r43d+GR z1LT~%6BW9PVDEIm#(-$JPz}ZgVOaAS?l0bKrTVQqAq19U%?YyA!wj9*gn_53A4`9L zGG~1Gr4y5e?8^mx{p)gLx(W0Ll8SKzw}sSbBM6@UYsPQL%S6#JJPb4kRuEAasIxrW zJg5TK#_`?^{5@jW%!sxYK}#1o#81n94^5X)(kE|$4f@aV=a!I$K6z%ErM9`BkhSbE zAXYAu^_$EC0jMTVc|0(Agk$P-)8F>xUIskj5-OV^*-djfR4l~F5(DuSKr~!Pi*WJ8 zYP@{DW%zjvhC8{;D*&n;o;~5ihR>dGE>8H} zgrAvf08~T1Ky~EGIq6}w@{1=t-Xr9bYgfu&P%X9j_8F?FKa5gmC0t!ee>8&F+Qu-@ zz$}L5e@FLFq)Ob#%8C?z5E}FCAbm)l1|Gln9BDxe0 z5<>?1i!qu2NKk<8q;n~oEvY{N?`|Xef~_lEHnMHD@l1xT`;1DT9_Sntv(<%CcYQYo zT@^J4tVAu|^m`Y#w!B$_@C&wsz3`(pAFW^T=Y<9x5S(73^>14w@{I3%Cr|r78<$zS z`&hdyI(%NQ=XGm244pZ7h^VvXpPy%^%MDRyiX3V|$lf?PzX~)|cV{Kk9!*)OYW5wj zw;8yQ94qF(%vd7YD{q=++t74IciL(%0OLMOp%PYjVhYU6Mljv09NrRs@@GY+cK+#2 zq__oYS`n}U{&i&a3DgvK8SX1!38$WXnxdYxEMcDJfY8K80MYXLIVO-+707C9&orVb zoSt@Q8V)YkW87?_*Bww_g^%MA+&gJIMx3}SAJ1A%bp1!yV z1kF0u61j|&ec~LN@JwQ2b6OIR+--$9I+!}3c6q_fD6~H#Oli^Y_p3(a#Zojel~&A@mX&Sn!o7Ekb(F&d-odDIzF6mNNMYkF)c;m&FmUr z0(tk+$4@v(a>Hjd2etEq9ko4aqeS5ga zD)_&&thxR^P5*Pc8faOQ(kT0(TS^v|N0}eaqI3VgOc%dgrcKO`{}})W_s^%%aw0dc zv8MVSA_mq=;^5#bkm-DWJFt+3JCkM}OaVS=Ia?Tz7$;7yg)nlJie6@on=T|BOf7rT z+1itSvwjlm=Rt7U^Gwp8P@YBLx2v51Nnd^#rPp~Dfv2oN03KjkhFfo_)O9wd}fA(>tcVAw25qj8AaWMrSlbNUs`acsSP78N#utN%_e}P zuSc!aL=BXKA>6#`&2`9lm~Crl@MvVPnn9_4+?CjE(7z$Yhm4}C1RB=sm4ctZ&LE^9 z=~15>*1r_+V^KT4s=yb1=q5D}MsqAmu|?7cKQtKfC+g?DXQ9G32Y>C->W$@b$J%+s z)*|09tD#xGe(~B-vPv(`xW%b%bg#_{4qa>)Aw(mAvo5Sm^7w8O7~C3El8M>~Ar3Th zb)@UyhE466DHb&T_7jqm3*8AliXM?F*nADcbPdcgj&~sGE%Df@wZhzJXKzvG-`}ZgJ1S6Dp z5V2lB`n%9fe9B#b8U49nov9P>mBOa9LT(-IWpAsPqIkfZx+h6$y4FxoNzgb+3|*w8 zIo>NA{44ua-{-unT+es?q(6UL+7cISe;C+4YqGOT9+fB7QVxjL_6|_}(2L?u^L&_2 z^NU80gp)_gt3!%%kxcvS8k~+6k-)OY4q1lL!Nn}Q2OOqLn2MP~CwFJP!fGIZ*&gYN z!ChE(+|aJNtsC%uS8lbx-WZ8}6I`C&b3g!` zJd$DyYA6Z^D`&}1D=#WhDxwf0sjhB6nO$}_6h1fjV?e&LOM{VBbzm%PB5Q5~cRYRx zu0O6CgNMLCF4t)A1~T<0TpCO3T>j#?+7*DL0ZZ94Nz0tQclrlO8`8r)B`Z@({3dBe zams^n6l8Kr>WYDU=6{eh+%ri_ogr)xNC|tMK0bLS zX}@QZUfnqSCrS4JB)!ZGkhGtd>v`!+LU)Bk=PXFT=fpe}RDVINkeTnVB`*2A=!0|MYxKmc5|LJbIj6K&Su^r@Wgl9Z@Kb>3%!9h8D!RL0sD zJSD+FXh@8KetW>jZ+K8Q-_TsYB(H4sTxjrG&oXw^nSpQ3+IEkP@oj=$d|aZ6;hDX} zgeH`G<2>-|?%Wrc+$wqx8rQJh(ry`ywmJqn3rucF_`4-S=4dyA?9c=psR)^cPy&-% zzz;tWbo!o2zVOTBR>kkht>>2Y^W>IDzT5NUme=&z?U$97JI{_)`%643htRyIhuB~4 zBWTx6io`sz(G?5z8K`bhblF&OuM}{O$w$it zJnaQ2z&WrTAFxy(z%X3ZWpsQ%6iEOEnQHy!)Z@(z5*{SdqwmY+ma81;|5LO6vf85e z4~Lb#KCro{Lap-nHaNT?*^E6bMW zZjk3`UDxb};*la~N2Hzg6TUoeJ;j>9eX(j`zxG}oWoks*+x~28+6ea0H!CidTY%hwR}4&mHJUwbd+nN0d_^uY5xA91#!W3|GK^gyi%vDVP8=dd>AaWgC6Ks z<9sbg%3`AUq10dn*ubO3jI_kGo8exE`h0YfaxL4$7$A4i26LGSMNu1YJV+6M`*Ptz zpnug_w)f4L>pEIh=DAxf7I*n}Fo+lXcKHx%K_$lbf&NK4oTGxlyGwLlug5yJOPkOs~=6ZKklDASnOic4g z#mj{xAsKSGq})={anolXp|?T~hFg*$&^~@c&F6_gV9)1u>G}~RVk#Fp{0?YW!^BtD zWWqUl$-lI#Tg4#_iQSq}ho>m7_l?ujvl>{j?YZ8Y0PSicV9fg6bGw>RP-i8Q4up4w zWAJe|qEQ#VgE4gl3~QL}RsV+3yvJ|2hP|=xYmi#e#}c;%v0-(;zRRm)V%*{F{A7Pp zu0))Tsi_*-#|IXvI)jSEiTz3u#!7Wl9#y4;c{DX8HPG!d270Lok(zpOMd_?ufO-2Q z+)U52#B6qZLY{6Y;q)@VWE+;@>rh0Gt623p4DCz1dJ2+dd@UGZ+Q3)~m*$D^w-NkD z>yh#xU<9XGyT3$QY`KRvD!}lIJ=6OtOo=~Z_w&zqeM>iW^7L=v_3tmPKY0BI zum9lnAH4n#!|RvF>knT4jn}1jTs?9CUaS0zLpArm!0Qq}`FsMc|Jz{(1^Yib%qX<~ zyTi<{H73nfTpah*!@!$;rrrYoL<^e<>ar#B{Ul%}PVkVxj@|8S&nx`Hulosk?qwE-&^kkB)Rk7cN5tH?$E#K-y_+;s-OsB6Yb{6nY8U(eoTxC;AT zPeum4%G#&3oDpUAkTW=$IuVOmO)@=d9>GAjG$ibwhk7V86n^r1y1n{JhtWm%S?oQl zusk2BteDj9!>7LnKMooP;lR5-tBkh<^w7zRvPys zhfv-G;1z7mvjPhVw@`{@A$UrM>FVC~EjEWtiKr&NXjW$$Iib3`p2zRA?w$EP%xj>p zvfo25&z*zNi=K{o#P}F0#jGtd=F2|*N@En-qiWt-m^jN?fyqOtQAf1Eo$nWGC;3)t zQ0BJt)bBP6UZ7Lvi#|?dGD(xSy;{_gA43!Hzzua-`spI)&6i|wHMn3pI&^~aV^jhTv3)B@uPOY>n-I6`KsH$^*h(eg3<{7oBFnw?cwyTs zp=7Vh-y*a6&OSh7R_h4*4G@{7vIGvF+%iqQ**6^jHh;jaw@y6(Y-{ZwCk2OvT(zGif%LQX$MRn_^-KlW+B%iSi~>9j!`61O0cv+F&}MAyXUO(CHHI+sZv zxY_Ddy}aoPPv5&-+nzZ@mXxI*+Y!2_3!@n`Rx9W6e)nPZ&|8GTcx?;g^{9RA9=>0G zSR3s}#=1k-ES|#hs4(k+;?-c{G3e%fa~)VOoaC_~;+AjVbVG{4ZFipfh5Vpb^vx=2 z08@6xx3<7q-3b2B7b}d2^_FL#Q1F$k!zM zA@~**>!XWbR8P#7PRU!jAL~xGUIhW;W7WP>%c3Lvy*jQxy~CTr(&%73kXf7$I>OR~ zYM0@ARgnl#R36?bh0j4!+U#*Ic5k&%fsbE#)tNaXj{FqR5E)UZ`SA{0z5Zv$wz`4o zm9XZVg?~j0W;iZ+de89}cy+IAXu4t1?HRP~;`${MRidrGpxIOx6HrlcWWLl>=4R^XB zG7eL+eD2e8Hf4fBBkQkw7A4;V792u-7HSPhjhEf6uJ6&Bg{)orM8k5U zt#w@<6y52xsBfkBe0Oc0SFKl0e?t1YVj6fahK^*@ykDq0-8X`LRjEmtE=Dr#d6>Rh z|4=1FvEG5X{K?->3AGNivLti$IQEA?ONKpiWP4YQM9e#N10@P9XJq>Cy3ne=`D|e# z)QW1Hp3l=;+1lE0hi^;Br^E%C3l;T~tJ3j->8;JrcV}G^;U{ULoOI8^@H=MyXe~XN z6ru`q!wmeKky}=RO-S8v&n+qw)j)#MYN?AS5HYNoIA5icW?9s2m#PB)vjtv<<5Q~e zWS)D;%quk1{!@JMGZ@f)=TYTXb@E)2+*JbA6ehx`lv=?orTa=>wD)r=bP`rodwR^?Dzj(DE#;5*B{pY!`gpX`wwgX zhq3m{|D8b_;_z?MxHy2Jb-@KIQb*Acfb)(a0GQj$Dc!jPLatGz=9bs-A5TkS5AO{wBGbc%F@u@X2H-{^MxHN2yn6+&wqkVNWLy z&~1;uI7sH%%{=8M1MKSLbbBnN^0;&n&VfUAUYRwwq-8LS{4b%7o~FmY57|cVGICL@ zg~&(mR)zGPvM&DDj`gjrv=|d?4i$=&Vy61~oWMHZjScgV>mlu;(WrS|wC4S*W1TX7 z09-n`OxTK?m*6bFtM`_QgR%@;&y?YIQ$BaB&70@hNx^?kEqsd4ammGXuxiaNm5_*J z>sAudf*bheApmwayF3N-l6C!932R|MR^M-*BX>W7^}`rg_JZo5O2L24B;s4rA3(-kob2ahOvP+S)N za(LJ&)kj!dL|~Drj&EFHq0-b_jIhTF8(y`Z%Ko-<6lfXch?{GK&9q(- zHhHc$Q{y!6^Q7g%TTz|D!MmsaQL>JbC>_0$dls3I1Gj7|x4%SYUAC=4;qs-wS`fyC zkrB>2C`*K&MP?GuBC`=ro^Bmh@j9Sny{SI*U{t9kVz#16>nZFbkf|`xj{_OnPqWnz z4X??UZxo@U?4RrK`SM|D3!buk+<0Gh>etkVp0HO4VO0}}K;1g$(LB_8lq>;nG;Dro zVYuS*zx1ue?wMZt)|VI~)m6Sg-@0$rp^KtNWknt6TTjXB9hu5N>KErJ^uKf~lVa4R0l8~`U_32m1e^j>-zYZvh+_$}3|Har9)^_Kz160aQH^d`vJCIk@wwTU6##FqjTQxO8# z9(7c8PN#^vMRnHOxLXNBn0kHORBx|q$5Z^G?&6Ol%WII-C;Q*IQmODhj!7}f|9U-x zw?PisxaPuOh}_f)TD;kXXni@7!QUcZUz<7yLU-}cFMghgNJzwWJC6Q z$Yt#d5KXADmK=&p$Y`g`6fY9}?;liJL6iko5f`BNiBKAuMOb9Nfk*=v?OzSlTIPV# z?6jkf;5-1O!nFmsXk*skbWL7Q`Y0D+2Rw2^9!!hv8xk$Ufzv|3RMZB7mfvx6UGf*b zu8yh_ghg;a@|Lh)tci-TZ3$R#Zaez+jfrmRkbnNrg4FKK5s(cp`B~K}pe5f*vqe4) za&^hKFj=SAxyYTv+bCBJLu}s+5-yA7F9>U%lpPLGsIvm{&{KZx4= z>ej7LPdAGcq7DH9+cXcO*l9kYC98x1Yx9G~iSb-^1OAfRup?Ln{>`ypZHIA2_jpL} zckxfIw#$6loJ8FiG&N)(t<3azLCcD0mhFcw?nt$qP=VR+Lq;RKLm}Tz-doP#ebfa9 z{)9T~6mp*t8mN?;nDf>iJdG0S?VFz@^(=P{%L5}@=Az9}!t=z;gaIKy7){Ag%d

zJm8XT*l%Q*#H7{mDIX`uv33XkFM`moxR;W6OA4%ABxvn@}i(uBIB)H zKy^nSs(#A1^org%L;X!^)@duv0@)Bdx?Z~vDBt%qQ+BBbJoAdIK^Iaz+HmZkI33BI ziCbtkNdk4|L>l9h`cRJJwe;EJ$rZ*YY^5815sO8WT(@Cii!`_geGL19nY0~`^pfz;2X2-B9W@Z9W63<$IxQnW`$xf!Ikho6kSDyCDsPMd; zm`|XMl2`{(2lU6MP-YZ4AWe>oet!X^$w#$mqd4hf#I(M>CB3#b5BG(K4byG!=zOKY z?gpgE8B|SO?$jfJG&ux)idI+MP+#rEh{Hk`0}AZDeP2DyioTkXEvOCY_{+h&0)!7Z zcu&kw$B*OETlE?d`iocZ!<^wo$x=gze_jJty8zFb?7ot4^N&(wQ^UA&(Pz(DVeji@ zOX-6y!C5#|!gZI#j2egzpYXG@q@+#r0%w9gPdA8yIgOiQda$50^Ia0ri&@Pi<%lN? zKqn5qt07)}hv7Sh#Ty~e!5`+GEvQgFeI(eu{M^i@!I%f~fs7&37M!zsCt zq0B?13b~|2R_{m}K3T8%@qR|aNKWZkVMSSHRg9M5rEdFz$1xdch^aGRv8CIeT3)`~3+~;=74X^^1PxO7F zXIR@?v3Y>$%oRgSwztnSl+*?!o(=g^ZW9J6{QM!`+h@~R%CqS#it0H|UTYi!=)^@9 z0G;>(5GO|y8Yhk;PEh|VPEN{as1(1V`d6HMLpEU(vu<$L2`FXCo#H!D}27%ZjP@ZjdsLm&FXez0|u}Stw#@vDZvirD~!1I27{pqTAK<2mWk=zCtFkECNBieJzxr3XVwt^SJh^h2Tr zw^wu^PNe<#?_&0b78?cMs{!g)=Km;W!vV$Y|HIx}MFrXKTex(0cc*l>bT=X;-Q6YK zEhQygQqtWa-6h@9-6?Ti^xJFiea=}o`)-ZlmEOv5yyO2cfAg8;q5rd(oh7%KnrCgk zLk#~~%w`XGEoRTi)BG-GgO2t#0mbY=V_Da}ZN;d67`)c#pJFz)C>!MOVm2kPe?2yn zXrUc|(utx(PzQ1S%HU|lshmgDaE=nv{FJ|o+1&ZB#cb{aTrCP}`dJ&Dwbx>{xFc`Y zp=pJsE%;yr+@HO!{E)`cGXh}$`q3(0>$R9&<^mM6fA3!>DyhEib^UiS`;E7-KTeyk zr@MhTO%qvF@6=UaKq9wegoyGhgWvna+^fJGKF!~uwFz)I_3TA$hDq_^mbQXJ#vpK| ze#pmRkm$P_arz|4G-w#!BvkarVi4x?U9A{T-Pza;?vd7VEEEYppU z@TOSDI>#EK;IF!~bO~D>)1wnyIUt}yVGuG2GQ&~Qt7&wSGYzQ_QbOzNmF+o7_Wlfa z1yi_xO`!R_BgI=E0StG&E4{9tnhYJ`iXc0C8fsdu=~kRJiMW%rH5*6*y4+0{O= z_TtZLa_nVVww=hsSdW-<%NN7|ekMuCT&)XiRyWpe3qyLQMzQy_$~zSzh%L<*U2FpP zyB=q&rpPDlD9G(K`=FJ8Q@l^+2p(&jnc-NZ-FU9w9W>wPyHz`)$6FHa?^4~Rs^IL1 ziy{=OxyJ7+P552|?@DNuCbcIjeqW^#Q;e+7inr2)Mi4iU{2gogw7XXS<-~S}Jq&DyR+CMnhMhUBN z5y!m5Mmy+-g21FnRF}zJ7u`LZ`?Awrnm|7)!Sc* zX>B#3^tUjo+T{Wa@r534^KbUIPV3>!e<@;TqU@}9HRU0@A@oYN)L=gtapL@?8FzGX49A&h z|AG5vxC=9`bLpUg@0640v-3}a=Uk-E_%rPg@rUl!SNMqhz~(i)Vn_#8=A$8s3bfj6 z%!MaRQ)ukB@{h=*wBDg=rD6d`DK|>B2(zj??hhu0#UAl=dWe7p;vwgQM*>^`+&nxN0^=~w;NjSQKhDIG+ z-LIjD6ro2E7{Ts?@b#u=o4!Xx4Vg#hBetg%bY6Z2=2;!N^=W9ddfirGhzke?Odr9lb4uz))pZ>qvl+pTyhouU4R zwT~zCiO%zFrHaQ((4}sl^qa991tO8o1MM1B7wdO@*#R1a3#_%BC{&uU7;pzUVhuBa zDxCKXX==!pBEx$3$S5;8p~HNy?(qCvyH|HO*>vKW6;m?c4nI}Q`#-tES^qq;{u1?H zqW(+Ne~J1(jHrKqy#5mPzleG!{vx+yfw%FU0wF_MpP3YmXP}*VM#D z;z2y1{2}6M@y6Y7$9J62Hee$oAPPms1B@ad>`C)YyGQyofYcDa}cZav^pmyMx((%(_n_wj}#ov8*asUGEXzqawBN+x1vonsuBU5uj z8DT6KsX%LJM&{>9c3bYD5Yy)zA%Zk08FF)|Xjt8qAg~j{CJFZJrp8#Hp#8;qaIEPu z{QQw&gz5z*#?>ZeTy8X?=+w}>Alp*qn7auqQFh_?0yYX54p>B9QZ`Xm4#7LJ_tPOy zVu9((Xo|334-|MdeKoXP);{YSOcCN!gB6f?&Q(}!|rg%$g1>{GOW!EK7bdLigg|86qjFj{Tc;JLQAM*w3^@M!NlOD2|HKz#W zNM%)yxfd)MD(}hw*TisyW$O7T8dJqMw+rE1>1V)&hltEH`0?T>L+7{&2uN*^MD2j^ zi9;!01!@O8t_j^Uat}&igg7|bTKNFSCJT+?bd@C`NcTO~_;%R+ULhNBLLQ?qj!@~I zf`^?F7*ZSMRC&p;iS~3Ast#R2fISl8rFBnCM8@oX!HZ7HZKB-FD9|g1!;EZWlh<`! zlnV&Yo~^bqP z>8F=G*0j8E=Pu#Y8-Z=hwWkbc|8?5wAEbu$+$_R(ugy93CyF@Tmf z_SUATN+(z8QMa~m}=UuxLWI|6rRU<c?K+=%^m26{oWQ~v0*|{F)zhVQ5ks?7jrR85Qq4;}l zAKK@ZOvLJ(qL(f_G8Apl`7s;2>I+K*96mgC9&-sIyKZJmW;0<o%9?-6IP&yAkys$E)2QgJ3#s>}!?0x_hnOeoTs%CncWJ8tbmh5|UfEu{O*eJ8S( zaK9}Gza2h>b*Jnaa=qwxPnZuGFd&vU9xH^|1wTh*n2$a{CfobH5ZG%H^K~RjNlqs* zDBWPAMVjDR?bGJtAUgMvPM0OUBZ{4{2_DwL2UGRmoG0`~00yvmUe#wDf&KP-_}`-X zTxC86kzCfrhW=5XA*m};knY1aGfety3WtG;(s)t=>a)Y&>NC=CKz-Kuc`zqAK^9P- zNiYKbaMA-WKh(?)Hb8wQdUIyAGY>3Z9~+lsB^rVYa?1xf5ZmLW+A;s|C4mbG#zgrX za(96l0%DEpA~msw+d(lo1FPy{&Q?$Ysd|AJwR)of8kl;1jbTWVEQjCv3HK)xJ|a|2 zNAXE^!-rYJ2J^F>p1SR1p##c=lZr%skBx0&MJb}eYJjax!+x{%4`_g`70E9beY^j# z_1jmr7L)vv9bz0>zL2U+6AiF6;_P?$IORWVP5YazmjJd7lKaEfp8w6(^k`brhd%pW z<1eD!{=>-FccU|Y#}4>g&(6xr<#l?=vi6{nCTcK79zK&E28}kpuj;eY-UfE7TuwlJ z#_{+7koDm`K-PT!P1Z4g$QtpLtkHnw>px^Y^A#ZLx}cH~$@iH5kTr2_=l>>aPsNLx zgrr{Mf5?u~t zj~l|?mUxWE<7v}oAj)cJnj{*kK*uTt>eHh;o)|EF{p<)#Uz1}}q@rO-I_s}a{&;k_ z`?c0~^Xoal-XRVp3y%cttGBN{W02ItwZ{RIxzv0vo78mMWIxMS(hvG&fRi*)3ZpXUo3&4J6^nE_)PTlc$JDU-( zpIwi}<|W_^!lS}S%=DH$?)Z1$v{OAv^SANsLd66d2gFf5bFUY3lNNJR6F8csslfN( zwwuzoD}6YNHlwZEdP>o~cBa#d%2Lh$VGPO?;x(=gq$|?K&?};wZVW)thry6!5Egh# zN^9b;Itq0s{a`T?u+Ob8w<0BwPqgU)=+DYB4K&^H6kR?n+4!Yr+F@mmnl?9Svd;-u zXM^>XwF#BT8;7+7jp;)}?PPTi7>az(GcF56TiGbIBJ?Q<%ALo#PWAfs!^goIplkcr zdNy`JV#*BsF%HSL?V^?su2Gs&5Eiw zl!j`eIZi4NBu|n=s~NkW_uba423KTv&RIWTDJuo3{XV?+JGtF2bIw-rw>L2+ zkUB9tEjs4pgRbYWj1l<8Vb!8$_oJpPdb74wtzz318Tq9NV(}zT5$}EMaS0(XeBH)Z zzhpKXu0E444wY>??xHN!IdL@4F{P3V}&;@1U5C6N9{Z={MBY&9NZ+P!oZ=-PGv31v;%QM<2eRcjdqfYEY z^%=wT8WYJoti><+sgPNCr)?&LzBUx1VAx1Gh&p|I6mjSy4?S=haTFBp&e ze)RKIkMp^aHTUskj#tauEo)|E zo^|d1j3(dW^|97%>Bi|_U+s6*15C2fAo?sT5z#)h4*WFEbXfMrBpLrvan(c$!!}Pgjuw3sJ3y$s8nOrP!Bpq z`NR4a?>$5d)Mt+7k(~SCfYh60;n54B2Ngq-_rsjjgL(nQ8e7UK<=;Q~=om;k~EaIYn%cA#TsB zv8dgMAH{+6nl@SaQIF{tkX|Q$uJ=*7KvYh0poub}vD9zRp@AYr2%#K9DU;Z`k04`D z>n9Pph)L0ddY++`;jhJg_Z9pN19VablL1-{*@Mi(%iDm z<{F7^r(p5nm_Sv4dg=_jxydI%`zA--X9}$Vfd8s71p$>S!E*^3fX*jO~$4`C9(3`Qg>hGESB3Ali@6|4TMuLV-QzZFn z7so1Dw;|}K;ZT#T4;rEUPH{;hvMpfw(Tea#>tM&$4_+FgQQz$jQPof)#?w?PFxL2p zc3O)K6V2Rj(zCjZ$e!DJc_A;*ARsS5fUP784uSEnM;D*#!AUp-VCSF$0l@_PXztcb zM&<^N&Q46fXS!JbJi7i8_FuyOOW1!2`#+4Ze}BCG68677_QqBRT(8&Q|BkTP{~_$x znXc!K&h|hF8s-{}fh782t{+4cGAge#U7uZXFJ5oeRt<%U&|ua z0K#jCaOxmR7Q$36b~az4r*q_rKw)NT?ewn{*-&9U3nVzHPNY-9HD4f|@Zw72^4TTU z*f&s78@FqAfUpu>n!L=B&NeLf#_#)EQZg1rdq1Jdhv5)j<6 zquuOu#gGc24JS%b-~FgHSusD6gr~mh#ZjDEP3uZfQ`kuZc)eR}w9z=(U-ja+;bzVL zeY3yVRt7%vXHJA?o#8Agd%lH(;*(P%3QWlA0jpuW!?fu+4?BzGuVY@ph$IN5$Kd)M zUz`8Bgm`3Los9W7V8O`_Ug$j>aASn4@0QkP`fXmFe%1n+)uFox@QXKWS*Fc+&R`(H z7#X2yORfGODko|?P6ki@LMlni5|2%W5mgdtbwApz|JFkuYSY-!X$UbB2FS0IprA>| zdr${^XgB@{#ud&uOa^Br@}q>YL|*f2Y_#V=_Hu6NP}3V0>hx5oaB%iOqkv?t_Rco) zIVmW5bTh~O1FA%ZecEI}bYAb!1>>wU^QAtoK1;Qn2jGYu;gztv;fu7>(~AMZ_WKuM zQ{kHA083rJq@gswe;*VcNC5=nQr>-lV7x9b)8M0Xx}5We{u*J0ajqDJf_E!NNu%S@W-LiZW~z)dYFu=6giM4L{XzCN!Vi1 z%c)f|Ddf}at$d=CEP+xoDhXC7WJi(w68q@?TpSl)pEOgpBh~!`#$g8jC$z3jrNAC+ zYF)^Sg43E0`)>1<9+fX0?fAQGymo{fGYf3zaLbJ~TVp-OTR<(|d4b@l zNZouHe}+#S(Yk&?5`yst3*r0Vb$~Xt7ua_3FO@Rz%EZ$>)##LHAAZn-JNvtv9>2J} z5$-$!yeVb{gGKy4p8NAq^_B{PxzAJOA^Mey`kPy4s&{AIxo2t~5yi|vX00FCwOy3| zvbF}8rr@N{RA`mojK*D=Py!=e1seM{#d)`y)@Ts+w?kRWENwBb*BOXF3scg z89n26m-qO@vT((NO2N};8r%zEkWU`25k9h-7~7RJjapsO8A_#Nf^sb+6B(iq%OWMw zQfPW&2mL%rEHZ#$Dd&Z5$pAUuk>Np}zauTCKPodg>m;5x7q|k4vHylN5eV9r^m+we z9+eJ0_;hgU#^2Dgso|LczGkV&mpQX`$fwpYWBKdYXX)_x{r)UUQ~-8uE?quP{W6T!M(2=UA73RZ}72E>fXgTWtY8m6cPd$TBEoG`>wl~3Ogx2Y(Lm19;!H0axJF@Z=&WUHcicHUB}Bkbe5Gly z=BUqctT)>>sl!zn>A&I$RX>O5%+Mb)x;ih4CHq@dTX@g z-Rl*2j@f%FEeTMon`)B={PNwOwMt&;p_~)@3y5>B4-Hu^Td_SSu`tZD8}vNC6qX^}(tM%WCe_wba$ucl_Y=w7zuAr=rg8 z*GYRzV8bE!INUpBzi|@;t-Dk_C7+RXr{rZ+ZpCQQ^ePx)K;kL$t|QA$M2GVs+UvKq#JdcRjgirv6}KFv;aUOA|W| zMqkovc;YW!VwD_0Cj(Ct^b-iJdsN>5=in}d6Xn5gMBjttA0W=W$4sL=8*$TmXcizY z4SL<{s*wTqy1Zd(KBr)snD%aXpqcJ7%2YB{XXMVqzY4{p-c$)_veLdV0--h96jP)D z7ws9r_$-Fo>p8gmGkTR$-b)xD6z|qZ&&eiw6^eiP7SIq#AzWO=Tm98o0+s4ZFw2`$ z<>APLFpGhXE%s&_A_kxTB5G}PP+V#8HPza)YW^8a3fb3d@Pi8Fycf{_3sL`hWc?-T zzeN3)sQ(i6e;85!{&@W*>VFaSFTC)S$?U%#2(WQ>|Dn_Hq%Egl&EcF zen4U^F0B@0wiH!3ZGpA{j+aO~)|d*}4dF2FZ)cx7h02wZ z+@`p(a29$|;*pm6Zns^1J!zFqI7WLEdV}Okj~)`4L!^0P z2*2EK>$mUrOQ(ewnCsexg6yEwU>IPvsamwX)a40gAu|rhm963`3r&;6ellD?Fbz|V z|I`Di$lZ1EQ^Ko;A~i-gPhVtniLHQTsF&1Soahs}+L@8+8C#FvDZP#qs9pbMp+x9+&RKptJ!)8Q*@2mzdQUxd1Wla5LO?0P0VR7X)CJyMcT+sj zwoPv1wKKn%wL54X{Ur%w5yC0@s&3Y90}X4xzz?fVdyH@De3;RBw3N zP`IE|^rPtrmA{mwE4BACt56*!B?Xw|>IZ1hWDB`?$Eq$uEhd_z`CANk?a(28lF|fO z+thH;C3VFQAYuB#uP=%~h6O3@Rns$<&`ZK^^P$8T#Ce;KACtTwk5~cJ#f#g+v&28X zTa2$Cfg(RI!{74M9D6TRux-)Tq^W-0mFKls?VOI2d5LnwTv3{sYBl-F1ppyhP@FudoDhcAwr;%T9HQncDX=oAmj=gRIKX_j46 z@y1g-11MIL2G_QaYum-#GE5YJq4*-<6n6Rt=GZy z(u;#r0LAK_wOr>fRpNXDFN>}d&?1dV$5i|sdJ0q!S=6sc-3N0u&eB=NDHH?dMUGH} zLee-T10{8`dsey2^PFiyaMhQyq2QUx!i}R5Vz*?whooK6(eW}21`zcP`y$EGXFhZgjW2`Je#p#jrkyjppz|#z=uXn{VWF&ND4t36+x2`BY-P#uw=xY zR$^_L7^$3k&ukekEgfQW4Q{An|HDB40*QWcsmveb@_j@p2-NPBK98Xcp$)5!1pc=Y z&zEU8ZgOmQJfF{+xe|J`#$6xqp%>}EyXtzrn^J;z zgOyo4*Y1BBEAW4daCM5iXN$LolLX7_X$7yDfg0AR;i*;-Q+Hyo`h9>Y8qSLJ;Mb(TKF;ny6Dqk>-G5)Z-QBr=sy82fj>`hSjQSB$SNa)#5a~e%1aOwO>36YY3uqXGk8QAy!Bt zMlTappG4Q*RzkOL@)*od`Lsz|2{vFF?m?O)dC{LM2dn#*W7`q9XIeC_&a*re9OFH$ zw)5fK-t@OyJ(GEl)s^IeWqre`t>$1r(5mBy4GihPh63D$?|Svr1fB_xmb=DGoZ163 z_~x5}BHSu#E}&g2(SK8QurTyItxr3}(Um9RH1w6OZ)vE0A#l45O%Sz1POf*`rLcBf zYtJCm4Q)_p2aL2WA&_BD)ca>VMK9TjE_D$ST&fsQH$G;8^Wp!NnJsGBhFo+|vSg`G zerfNCRnB(AXtLxe9r`N5#bMYWZ?EMw40xxxE3U2>6t6qKEpC|Fqu&}M8MA#Q6Nx2C zgXM~^hWIJKI{A$%o@xQMx%kmne zP|&7Zy16PY1OvZkMYz<@RIkmsk)h*8V799!D9>I_gTa~u8j%S+*Ul6LU zqntaZLnqUyu!Eahg$Rgm$@*0vK0!;hKRN|h4jLb;0)PeQqHqq2;*R1 zs7*0XBU`=(p?`@Lkou%bYap>Y(2?!JdU+ymWh;v?Bb%VH z2FtxiDp<<1Q84Nv(%uwI>8FmDNkZ;xmAp*|7OqB8*5#=B0ta}+DXaKi^clq5qo(EY z#?T&bNNE+=R1&k^kGZ5{+`mmQ)x+|9kBZa7Plt?K(vKuY`}GbVR18Z!)<=h;MSf$~ zxISm^a>=Q&;l83r<@lNWu`QhvS1?q>69!ba!;}$zqc1@Ow%U2Yk-Zy*>I)^V>Xzsy zyZ%ru?sN5wXk_uPGZlApVMj{cy%~9;LJ~^KT!_G3_?K@g4~fa}W)=6tKlc7llh~|( zo?U+#`!8evW$eF<{U65Izdv4o8T((1?dt`NEd?;P(Z6AA&VSv7H>3WS!_1;kG(6w^ zGu0ICmuj|5qyEf!X{Hwvw0Mm`2=IXRt&#ixJ`86B`C}5#cR*2rPY$-n z8vw{=`QMQ3Rn*bO&kFY6kWI*V^}BIB3^cA$HnDjkDxt)x4N(xUA$RYyml$#(6_h7) zc*e5rBs3$qm#aT=nF5XL!hEZ=g1JPos|XMeFn|8JH9d^60Ymee=061ebTG5fTB_ zO_rTs&Tm9GShY-9qD6;T=c8vYE!c6;qcx%OMN%D7TzV^zT$x5wQlSRY2m7O+6 zILWYB+K6%pJ1u0c7{*_{z$A89P`M6>r+6y^6UQZj>>!o$^KD1n7F)iYqDAQ<{~Uy)WxI(7XIk-& z?#eP`JO}1{V;we3-5|S9@+R)Qf6ntRI~$Dp7PHo2r<99Wua=~$R8A2tx!$`fnTtr! znxL`H@*fWYP+nJDGDYW%13jgU1Q3edb%xghKJZOe_4?#Gb>xVGcYEm@G^hm*l^00t zcLVliZFKPsUipL_?FhAkjPdtiYz(xuv^&4A!lP<5r1E29>L|Y@x>)6ez%!bp2l$}@ zbJzwE5ddT7X1j8s6IcLq*l`T*oZYB`w2kY?ds(mcaLpB~Xy)a$-&f(5F~KA=YUqjS zx)EuxR4JQ75~{Xz)Cn=T5<|f&lPY0y>?veea5PFfit$5>LM<28PWCoDcV3h^sI>60 zo&?{q7QfOm>YwD&Z^jCs&sd~ZdCJxqH4?f@tKoYCR@2vcVjmzY?m729hOYl0L#R+Q^}Y-1IQs>Esf!3^TkAUQ>`|B}1I;VWt5NX)f*d#u2WeOY5x`sjuptx- z)|IHnD&*J^5}LVm*`LI69xc-SV;Q97w9KE; z+eibBKW#Xuzd$K9oGzA^i-RqUflPFnr1twc*6?I&u|f~ff}FhTGzlaFwXOyJ;30-e zz7We-S^`)4ieL6;54*PW8rZ`&+9acJJBVX6di?G<+68~U>9y?C2*}G=(pLw3pUa*w zrxAR{D7_fpF;og%I?s5+sOK+7cOHGODn;}WmoEM2$~4bAp^FbADNFCAHpN9>nYzQY zv^u}UIT7mr3=W=Q1MV4O67YH#uJ+orp3O|$CvR(+e`9}9RtARm>9hKI7|KM~z)_KA z-Rog^Qrp#I_H;v_&*iyby?^GyX$n#L>OoAMjaZu#i6h~fWt(U~5@nwToOGK>(x&rc zd)p`bHsSK6^M|gsV+~eVjDnqJAr4Do&eSGdCg$!5mgR6ab8b^Xb|fUvdWXFw^p?}x zWnG;5k@Q=0w%uAo*wyTj_*=@i<$n7W9Mp(h=~o-a=;JuVQH0%9;7bjj?c2AlYg-$R zdGbX{JG}Fy=`!%vY7B~CX&bv>X?h5HB|^gwlRhy_w|9;+cBkjtM0(|&YXa@1elJ9N zVChfFXO!H|{!G2&7c^%J=O+4Egmc$P=F06>=r`YxVcLycAN<2WRkLD#$}Szbq_?;A zR1)_l`|0utuOBhzhNS9FRIR($JT|T&9@V}@bmgR;vZI8Vs&{lsc%clx&xPxTKogY! zPmCIP|IOgN8MX5Va@VDx`9wQSQ^kzfIS;T2`USvl)}Oe+&z&?FeVh;hBTQt?I2M^e zi83FDtHTk;KBzEh^bjca_c>>G)Q0fd07? zS3L2}#P`dFR2W=aDSDsLt0mo`_Keo+&((H*q|qwn3`WYLPnaQ=C}N$J6KLOOP&@!p zcv{&{RN4nKH3l$ScmE4^AU|0JMMQ0cLn41tcAh_$5(5SAAx5;+#}~pH znVa4f8PjmNK^#Yt_a}H4Bqae>GTEvHoi4c=uuMI2`xesfj*jv|OZXJR}gZNL?7S59UfvqZvBB(vpO2GOGJ2cN=p z5pN~Ai=AN>vUJlA7FYbD3+x|gTY~GO!}h*ncGHj7xvuUSiQw0{E;_7B5c+r>=#({m zZDGCwnD zc>rg7{^o2Q#ZbSf>|9$sz--2<&T(nQgdN(p)A4`Jvj052{&MzT&i>2Ue>wX>~}ij z)myBy=PRd=LGS#EteWBYl%B^f5>()>zxs3e(d4dQuodao1{jM^W)2Ge@4>E4ZTrmY zv{8QTb%<@m)D4V9(*(*e(YVnxRM9yhep30l`Xzbko*xhs=r9yC3d7IV$c>r| z^gOCDE)ldtEp`Qb2in(X{bZ)x#3?zO3=N&0M^N(_rVaZCz+e}zuHbG1FxbUYPp7t2 z+L!nahg_t9rWM2UooADW%K{^Bxn0N}<$@IdSqeHmv271mWOwfw$AO}SGoJxeo;E>1 zoY@AqDUYBqPZkN3*KUdk+#RmhC`=nsN(#z)FixZJk8Vb#8{jk?cL>V#xk?BDP@9Ps zg%EC*(nAmoMQe~Bd`{1b{5mW`Wmyf2E{K}h7PzpLwf=rQk1gzsgmfie2%bqKw>8HTTwdnqs>!Tn|@X1KTSQyB%VP3Orf_o&|_;+~4&GmNR zK$cC$lQydA@~zYsY%m^7T!Rrobmv$$~F4Dqb0bV!MtZvSYQ=3`W1qvVb8+kEj5Hj`a zOxLHUL>8qN{}YKPjjf%F9EZo7T&u^{uwD3(C~mHTlt7J_kDpi-$h+bLglA_<)n@6N z@Q50X9pMOMCXOQ_2CtvLs-6Y#uBskMy$AgfR@zmAi*zqN*8A)SGL#umZ#dhw2Q&Hv zCT<2Tp<+glQ>>a+w0(gkuWMZi;^pdlc;6O^AcP&%^Y$K=q7Feb)u>2Ph!Q-pe=#Ibxoh zl^JE;gpw1bov-xepdb8=*|@?$l+Bs|YXd~t(i)gs2y|5p;#Y4!efo|v(pF3Drh2;B z!=n&P$d}pH>ol>TzDRT!_(`FFB6;@q%Qr;FbTAPM7eWKDCVG(Oi=55_hG~ysS2_lH zV@|Ln;(nE1i9!bIhbx}B=g-=CD+|`OD`tkZZnOM;G5(iSs@*zDG?K+?@b>|_S+A6h zS@DI_{N8VUmh7q;12LAp2p+{ZGX8xZ)-}m9{-H1-%!U?6+S#)(3qaXKzRj`#Wh=c> z_5p~%(I&ouU@%D6XJux9vLPjHI%aTrW(_e8#0oQ9+=I` zB-bb2ftzilkmViMH-F%3wj$gx z3=i&&@Js^doql#%3M;cA2vjt$))yvH9Y5XJK#(g>{pE+0*Ue|m!_%(2G zq^nOzP^W!Zv`N(5rZ8*=DJ;KIM_hK&nyQ$y!ZdYf7Z$$Mu=^Fo;$n?Zur{sRM zdqiMFTe!Bt?)}INPt*w08-rF}ElsM=Mh%~+{G#Lb6w_L}x z`8~4HuxsJQZPMAyYJ^_$eD#t&p&O6ezWs;um^RfEV54!Q)Hau?_g-1u$|>QABl=FH zMXwo+J;WO`a6q+ed(RWSnHII#Dsa4HliO8x<w~l~S7qR45WcM7k4~>ZQg{edCHh(r zE{?`@CYC$nx))O6m+T`lBql}D2GL@cFKi^;4%t`rT#kFHZcT;j!%f*T8#Yn6zB zMNjez&?_roxGg+kT%pxv^%7F!(r&)r+^5(S!9I%dOH-I_lM}K5XdQqROG$w@Jn<>a z`Gt^e^jB^znljlt)`0m?Bu&vrQ4?Ev*!+R5Mg$bP9mr5x0S1^D#c~)JbwqW)g(4CJ z?pJI0B18v&(wCBtvccyey>U{QTH#kd$0!poqTdaYPZ`g8O#%S3SrGVp%jI%heQbDU zpm?ojZ^X}I$IrH0-+>hd^K@(Y;Xw1Xb`3-2<5_#Nfa~L_aYeYw!G59XFV3MiB2X#C zNvQAhsVYPyw)PF&Vxfvb>i8%Z5MU27CVUZO9ann$z8&|Ina@PR+|!djxtRON*EH31 z%DZFH^sKL9yUF{*E*QHDp!vFbJLDPLkZ%>eMMJJCMdRpbpQ%AIdpYv;sJc$#7kD!4 z1`i&e8JAlgaSbWCUQln_qIzisL&ZGoL5Dp@MNFGN(FS7}eKZ-KAP7t|CMt||K$dR( zghbI2{_>NZH%Un~XU3#=`Fj-IiDfq*=}dN%$zHjEt&+$?O(oS6Rk%kSSs zq^s)Kr7>f8XVt#w-)CHmg2AbRepf9%c&K~=8BXzoay4#|E-88Ou4j?2cb=L}-g2xM zZzA4ZJY1S)4*=gV)MpQU{sax0lTru1vVGQn+ovf^LXpdWhfB%hMFqBsl_glF$S@wA zyCa0E6jwY>aHDd5SxN4RVcHtw3h zKMC2}&qf41IJj7jNK?GeLDXUV?&yF!%@n?->TI_BL?{g^JIYQ+J;(x0Y&XUA3$s~2 zSt{wGwioZoFXcVWun63+jknYZN9ps(&+$A+xg8pD)Ni-WzFGvZ zu5!jr(+r2VvN{ggB=E|kl-GA97r=erdY|`1(JfL{)ce3jmOH5cOQXkVDybgnN# z1l~d^t%|IQu4mxmVd52-2Wne_uJrhby_6=J{ID$LS%|I>U0nKx{oBmlvwQB#r?r#a zD%bvMewojCpp&2c`{J21BnGc1*SmjliP~jvi^s|Za?8(8uKRBXb`ZX73VIPoe@`Q! zW5gC7U{{Ub$~!Z+e}@q{xnV%GTKO^da6f>On@hIlqtd#Wt2g&(fMSss5wS|QJ(4(O zV0FMcB<4sUjq-NQZi1p&4UMsxMPqtRer(x*1_8#N|09*wR!My~Ouc#gt$FVvI$Xo39gR|Hin>`!;JpoUgE zHO6wrvhmA+!;CK3aEqP}3}sZd^X*JuQ)PFjM{nlT{OQn0x#6h*$Y}^;SA=8d*j(an0OI-7DvD!CiS;Kk40bl!)b+A?p z2PyZz-f>RYvcM`qz)SK6GJJZCiQe-M^kEcUcM55n4teRp7ObNm(czWB zBYw6V-F_O|{alF`Q^tkby;bQN9WD~{)zn)d*2qxtY0-nK>kXWOxTHfiGXa=+zzr@2P zMY?cSDrtH&QK!oo>MsdUBJudI9O|^Zc*4bpd^Y7_v;^R&va_~f;Wmw;n#Kz*wzOin zB4$`U^D=4SFxAnba`qo{-7<_7D3g`iNakN`Ezwx^ON>T!H2Jh{@8SDGyW>gfCs{1r zZS=4_JD&9bZAVj33&z^LAnS#p3Zr8*k~=_I{2=sx{X`x#D7R%3?>+J6^CKSR6r-VA zrtuPoKE^ivHu_mAx~8OUVD3Q@kKl3%SSi}Bf<6I0V`pW<2YuAmqUlgUeZPCosBd_% zn}+l3@V4&BMMhg-u%mW@#OB^3RaY@ca|T|(L(+}w8he95Oq!=c>kDScK4}CkNkJKg zbY#JZnHyWo#jz6Fn<3OQm2T+eMri1=o(FQtp4#HtV(~&BN7%j~7yT|vi)1BjibK4m zNU7=(m`e}#1oX7I7?7v}WmYHO(kAnvSsb_OeQUKnpz5uIaS2?Nr`}Y`cP|W~PxlPj zXuyU_HK0s9h|`_gk%!H*G@%|UuElxrl;f1z3nFL8IG7#TwkU$_?~?I!*7*fY&$hi*^qs(UbGS7NG>+vQbwHdJVODE~`Ytut4 zWt63PEpb?RuAnu*d2?x03JsF_suk{tE(Y53ZILad`y;PjlU&uCaWYFyU__U{_uNTG zpz6UxkeQHT=XBY}yw;jw9w-G~Syao`pg1)jH%$@5dv`Q|EcsBt?rJK3>u4HT%yvZ@ zC~7M$<0&3`oZ5d1+-ehM)gDDmuL=RluN0Jv>LZK0h=dZJW9qm)dSM$&jEziAZ}hMf&?Mh3Asd<4K5gSmIo#}2`$=6?Ihf+ zWWry|BX6=DtF`9RB5yMOG@GKPeuUeGEQzi-$Qcz#8A}}k-yAY&yiC0oM@?NU=SRtTjOC|@J}R0lgMX=>TJ?Ni`8@a9 zMKr|Z`D`S!Jj%Ip*1derqE4%BNoSVK49dyKrJ};HO*y=|`gZ-@mYm(E84<3G<}-|o zM&|SRJs`zQN%Os4TB~_0s`r%Fz1Ng0)lwSfVOE*0+gC*FOp+t#>b`lqCmeNN?DQ#cnHH*M5S8yhK8I&f>#3!?#<1F0)B22S6S?68i z!U0+!q|yUqV1kIVAb~Rrso*^#x>`0+@WM^U$^7;H6@DHB0T<*x-=Kp&=mfka5P$kO zUr5=!H4wqLHeX17_^vy>JED3E-JB=^9O%f}+dZ z>&%0}L1=2(l_&S^oFi)_zsdM$M1D6zk$}&R#)UB$U3l^n_fBh5Gp`Mdndkz0tOKz# z2YL4eioPP2aCfU4GFT2Tci0Ei5!wbUvqi;Rvs1BluaHFHhe$J<84c4$2zO>jV7kgQ z1ZtU3Tou7$h@x~kDd$#H#s>1W3d4latJ&5X%%j1t+4MpT8}doLdIMR=Y|p5lzGTQ} zHLuzx7EddS(K}Do0Y5D~R0$Eox&n->V%$^KYxYjW8I;KSHVVaMZEA@Nn-2|N#`F}W z{c8YC>^-5wC&2d;Ss!A9hK~a^AS#_m?-@BpJr%E;scd_fOV#|lCq6g5X@t$(*C6l# zhdi)9%&TOEMBoW3RWMg2y{UZ`)_W;%YnA05sahaR)^3%%vCESbcT(>M- zw7w8^fY)Q%E1j4Rx=J!pJ2ysMxsbKt3?n`K(1U6#4~U<1*A+h$XqWZOM=}pTf25p- z#H-JF6T;pJ^z;7l=zb_NJ}4ay8lh$L7C>xYt-(Z-&J; zz64pj7wnd@hAM~nj@qK%L3+Kso<1S5i>{#PCXP1{?fx(N}A|wx}MDhU(T@n zSa;K*AF=KfPWJ!@A_{mbBM62-+CzrHTW#s>KZ4^0n|Uu(KKdJM;_6@BL}#7}UkJ`- zCgSS1BI@7K=W7-fO2gq->vu$6^)HYNe!#}jMj0tyJ=4IUFDOoX0j8U0<_`+(EIZ@m z2*Q_rKpn{(uq3g056+~lNozHQAFzkU$<@ZXZb?n1)GStjLNs#8P&$Qi8(fvreN)0?`C|NyL`#P`uz=pXIksrOp4a! zGk`}MiMv)AK7?Al+6hC-VFK!($APAi%`l%fEt10rbA?4B<-Sl@YJ@~BX>i==G*e`& zju5_-*e*Q(Tv%1qGWVboAtlY;|3+isNn7-G9%xO+NSJRU-0XBYqPtKjTFTvW+8`b` z%`s3)+`OI3_9>Amp4n1<<_QXU+4|03DYIh#b!2b8bFO{mhtSfVYQ6h9g?y%fUY91J zuKJL$Yg2N$SF$m75@rSqCC~)tP9R={4a~K*O)iupb6OJ&571+^LNPwXi#1kN8o`V~ zwHHVzDkQzWIz?UGS78LhW&?;u@m;a45#30-DF}uxJ17`0pPxibRw31o$Efspr&ma} zt~7AZtQfvhA?RWc;6gnnK}@VYfEc5S^@uj_x+pR=&~a|ey0bj0LK2e3$ns3S_*2yxe5$&1G2Eu-bsGQM^BD3v4(QnG2lk zzE9xwnp&!^%OKvgYsq4Q>*=k9-s0%|ewBE%Qq;W^@v`M=TcA7edW=FCvWic$#aXG`w?-vGZJeW@GA*Kf z%P65d4`h4}xkgi%u0CWAJL1bS4?9?q2o;z4zMMn{s`GFI%jxJG)R^j2IuQO=Cr+>?P>Bs2FWJcefcj_eH~a zVw&7|Vw|&~3U$rTy;rNp=q;8zpw|oe1Jqp3`&AOV3qsXhaVKWRU4G=P0#<@Gjn}wU zFvzBwFwaZ$fs{Ck#KJWrdE)v7w7f5p+>~_1HDD`%6xT6I+uxh$qT{23LNpMdSzoLp z^(qBe9)h!-B{u1IE^eg}(-5W$P2QjfuP4Q9 zzlIMZRU2ob1;M;uB`J(HgQ{VaF$u&ixa_eux*h`B+8+Yizay?GyBSJ{@|sr{-W5{L zm!u4{YnFsQrVfb)C-z>?iIFTNFZ*zda_yB~{JSEb2)Jg}dAb#o`|wT?m~pStf>x_B*Eh{@(5i0(8S+u5 z@Wlr@3j&YZ3)trZ@4%CdGEe#Fod#&vr;GfY+HrXW*ufdouhc`?1jo4Pwgu$ zF7cz+jn1Sr%v7@Cui9*iG~RB00a_oe-Gx{XC|zz$Dlu*(Xusd_Gpb=sgAg06z3nnI zVvw~vPh`-aYa!aXy8xMcN;-|x@bWuw~NdMoSnU~lO zmV}u#@HE$%<0N4?F7LkrY%VhCpeZ8CWcM)>s8MB8N?B8+elx{lzD++kc9F{XeOhh9 z!DN)0(86%>+u+!3%(W-86d^9teZKGuqGChH{uM|hi7>M4-def!+ic>ziTbkbIL{X+ z^Oh6lPK~#5I?C#!xg9L1yJ|X6L!_lMSg{Xfzbe!lvr&2VstQEQYlXC6^_onsWOu|t zjhdZ$HU*>&;OWO5D-;V`e#!~8HRXL&f0R-K+v=gW5<2~FB{)8}X|WWsCKsZ6s{_j+ zNW0rk`^RVUSvPPcurbDb&a=2!Zr&*zvawsy9GOZ;niy4^hwZqP242o6UvV+f2 zUbc(?UB0Z{2K9BwugXj{0vnR1;G`Z)uyFzvzZ3?Awp<9wSmfPgVooR$cz=!@WCOBh zu$76iS;bU4k$(B!=x%d;zey~3mzpcL9j3ndoczw&8GLO{&lSZmaDi|HlPZ31GwzW! z@!YdRj23Fo=Fd6jdX|mr`U^7*g>nyP}US_Mb~6S#iNH>xazuMIOq$L&3nJ$(<-wdLO7X zQ{ms&AvCigoCMgkv*iwTqNL+$%d3A2Q3xBX#?dt~h-{B&^iI;xa+Kp8565bw^vRcZ z{p6RlA-P+rI`PI2`RI17X0Kl{M#p9O-B%Z?P^VWXqei!oG@J%X|xLQ~yZ8Dn4<%7E2>GCdEyeDmaBpb9gQW8t0ReO~>?NQB3 z5aB%Q2fA|3od|Z>2UZ|JD?ZI4hn*YD8~n~RD+;__>ev?zJUq7xwrk{$T_>B)&7N!0 z?~a`fD?sSY-sJ^jR6#2vjb`BHAR5}O=4Z}=1;8d=gLsSuh2X3pCtDXH1@5?3z&hq+ zLe)W?nC-J+2xyVMHjfWt?4FH-wUks*-~!jLgQ&FwY;q`RR>9rukPLxNjp5kN`zG#^ z`^emV`s`{4b2V`Xu%v4yK#mpqTZo<;Vd;Yaw{K%7dCEOupx;sT!%~fgk8BVa=jfof z))~3TEFPiPl0Qd837eP#B^nJsFvpe@V> z<{O*{=Ry;wtgwEd9u63SAG4BYWCoPaU7gEaRX?a9=D@9u$Lnbh9;kkB_5-^ROs>p; zyu6UE=IJd;;sSN+9SFRY$0nM`hyo~p)vefCP;3Oj7^N6ft6zpZF`q_-_}eb}*p#W; z0J_g}SGE1YrB1$KtOY@4FG=#^1xD72pBl7?cjlPA@-U6*m_1M-57p*t_qXTvS^Z5}BJ$iCs>c-5xEyCE;Hj3&I`4qs(ZZms7+e0nXfC)FkYrDl7d z5NH{uLfaN3RX31x$IbuV>EyrZ+5fT0|6`N? z$0q-eP5!^kCjXwi{$rE>VUu?rHUKdLZ1Tsyw#mP*EJ#GeZs}QjIaz9HYw`pv2ySLIUk^Efm~qG>KUB2ES7F5 zgw?5qRS55B)dOyYK3t7~_SZr!-K{bTr3;esvcs3I=phYNO8r;VYRho2N*#& zP1zf}-Fo&d_NkOP4tvqvcI*Y3N>Q7vO-gPh4JfW_&UwhL*}9#AXV~n13+u~5XNz`% zneY?9j!y7|kjdl2>&<=qzOseu5t$!z6~}>Taz@rQl!T0`%VSi`Mg;gmTkXS7TFJv^ ztZIwq4yreb^6LAynXvg>`-9VA2=lemV z+`>73!#R8Y7#Zvaq#>Y(nSH#zPqr8IvIYGVypyXZ0hTRo0lbFB_q_r*?6cHw>JL0) z^vIeM28tJ4u$B=S$*$+MbT3E8lper;W|V*AJ8tfRfAQj_+s}=1w%?3$4g1YT)JGYm zo6e$Ub8h-PC_fpcWrb=(m*(k}&4YnFavm*4u*C>6c>#Irw*@;i6hpw(A(X@vLqMIu zxW)=e`J;mZ-h)JtKKQu1nwpxQ+aFG>vB(-I$GVBQPmrWe$tdCD>@ak2Zn-@`C|cB@~K zg+E|3#b^cBl#Np1JDZnnv%>i^4#kO8fDK)V8Y_Ffq$iw{yH+A8%B#v^*w`?U~1EN&s@r>~`qmLf2OjD4!Ye(PY$WO}5Zxg;EuIEYv%?lHrcldZ6C3vu%RFU%QKu*1KWi=-d z^FAQck@t4~G~$(hPj*P=g^iEFlvKDqMmxZ@zP5LpOZnW%qxEUjb@1r*_H)1G1#bS3 z7r&x{h#R#~G5@3o%;TUXqZ#cV7KDF$hvP$NRT6 zM9>4fdM`s?al!ccB2x#+EAu(>z6dJPw?vzVvlq;G?$HD#fM%+NK9DL2{dDJ}% zaP4HI=GoJ|^a^;{w&=jqD`|9W_YG}D5%x@D9 z=i2WnfjmS2&UI%AE_0WbXw~*r&_m2&?x$24$bmW>uE>2Qk%7=?jD7)fQPQaw_rIxIwuS=q?Yyd=Fi&A$E+G^@}^ zjC(Dv5^aSHdBuQs1$jE5p#|~yviz0JC$n&Fzp8;{!b}+!9NS!U@O{Ak=S8JSz2Pm7 z-a*(!oL&4(riuhNXy4skg8T|@?9>vb>Ip@K+n|GeOs05T@3gLSlkn4g8Y7uuA6nC!g zAnTzG^(i49!SE^g>w)M`_j!}@jyj$n-XXJ-6I5IeFVe(`Re37O9>tUi(DQX7pL_R+ z_KVrnin_n37WauMiO{1rCTV^%hkL6U+|8Ox5Q{rlpkH%ww$N&PMfd5|Bcu;ZbL-kQEl&=h47 zCooIVzMk*5EkYZ~wbnvw?ssxJ#6cs$Y}qFA9hOK}46y7H6J*|w&pYh8Cm=fX-Q2Ue zeWNhLU;cr}a|H9e-(gF-;Kb0Jo8y7W(3}*44Hc;-)8{xn+-E21q)~eLepC6B-Gh%6zE7}dG!XW#}RUNvBd<{o3nV+%mO_emgYs-lTt1G`C&fG^;o7Ft3X?%VPVa!TJ~6L zG!dt6E)^`7;P{MpUtJkTI_IN&;EKeerhB!fX;CEA>> zD`Hl;j1anNc_}w)*=Kk@N4hj>d9qVcXDX1!yuSEkv9|Z9KV4$19v(T*k?-vZ(;KpP zYE|wqNgyuD-jPR&=NPlR`kAZAE@B3Q%Uar|b*Zc_2DpS+jt3QAJQl)FlG?D%2pV)> zeq)CA>desLYboF)Be9ZJf1|F|t0j-YPUxoG5B1k|<5%apQh0P;c{lg6k$58h3=&NH zP5I5AkWn0v_{Zr6Fy+3wUN^J^IS37+vF;wt-654WX5Ey6?0T$qX_@R(-C~I<4D1^3 zA4AtV+>%zJC@_S=Y017aLxwJWIg|5BRlq&UxMy$L9>%U&Ye3Lygw)_tP>M|47pJ?S zuUsp2LrK2bH-}fekfJ{9`YEV-0OQj|?@ShT+A>yM@&3o4#EAh(d6j+0UZaD8isq9= zSsb>cd<5(f+;poh?ZCbhzXa`X|KS|`;z+kKa#9~kq)UDD%4MgI7M3?6&$erzr3KViz5hn-BuTvqJ-y{oL z$u181F|JL6Kj$q8(JC1S{{iGa8%i7@{~V&R1?9v?eLUHL6@#b;Sue3Th#oMQl2_QujDEYh~mzzlIg^Ykt-1Q0J#+ zbE=)2T5>30;@`@FTqLYgOSPu9*0Qd@A}^oP9QzPyzKE;4!s<^>Hcpso)iquYwO~=M z#l@1#3SO})yV9PERqvRzB3i#aIy%YJ;FhJ$_KI+lpIKvaR%0qeZ^JRpq!ATxJ+4;y zsSu&e0vi?Fn=EJQSjHeU$3PS=%0rrmkGapA(Cw3N26I1s_@IM>Nz$xfk~WuN=IAF7 zli8Od%-SJFd8AT^6)F(v?12V(RDdN`M4wa7SU5|b`YNJsAEb-1oOv9-wV##Pn{|?+ zf?)i8X06^yGTj`@i@4(k8We*DlyrTs8kv4o7Qa1=Tn;ds^@ZN+#1oSqjEghUYFon% zZt0`!$OySt*qe%Z!WcRq=z<;vxf;%u<=U)^15^A%8N#zRN4u8j5X$hH0^;lX>`Im` zRTq7ky71xd*nA3;8BVR5KX`RJLOkKzJ#%;JETP4;VWEHS%w=`}mbk=1mnxy6q?KRL zofqG%7)#|F6{aqEslqy6Z0A|D8aG`AM9~CcAxdGyZ7KoenYUabW_xzj**`_t-}wqY zUE!@`^K44Zp~lV7&D)zQh0xpTLL5%{>c|$JdbI}C!-WEav^9uj(H@{`l@XvwYg^jv z9iWAT-+I^{g-r-fE}}Y#t%F6 z!*}jR&KVwVJr$qbHYzRq@SrjuKKG=3^eZI9eY>}N`mkTk0F3+On~xCGQwQP_@*!zr z{{h(xmpg!m$`~xJB+Pkx;t4J4^02$bUt_f6q2;2{MWz8xpy8<7&w3lEO41E_t-1v! zJ(LHLTNUea2pA97wZiq7i0oDy-$QIjUb5$|+*@VgkxCF7X6=>4+TNNLQQ%sks5HuZ zcGj#NanH^xyHiwbv^+1+XPzPK*{e4ZHVchstS_$$pLpV~3@|>m22PNqG@TI>y~!XI zzwE&do$OVD(qAC;k~E~lb!5!#6p1szRF;8il*ohP1X{Oe7tpJ|gFcDLp|~U8$c(K& z7blvNnSYnuJC%JbA=#anboKGl^qIxnjdRy#NaE$qhSfIFK?+Ck(!wJgc0iYz1~V!4 z7$i1n@Y{1(ctcQIyHi&0;5GEc>n|&DHOYJQSlK4%*4C3P`2r0ikR#cc6gVS;4D27& z6IwGw-0pUmyBE|kj(E$VmCxoN&hx3)PR_UG(KI^yN*S>oAoP(}V6x=pJnlAMCAL2K zBJh&)3_Q;d%#d)L9QYjCwk~l&HJSqn`z-Zz660o6KfoA(sdhbLcaRCjLOW$12*KxC z4w)n)!Uf*a+9^T31lxL$##wX%g`YQ`KHkm&=^MDK3@^#-yI)4oyYa+EMTzP5aK$#R zQMl`B83G4OF%Xsw`H*ZAarZKa<1T==y}K{pqDAUcot8FJqxoAe>XfdtAjT~=c3(5dLbuBF&cK;%AVc4zFFOUwAaQ##MO zcR)ojwD%Hun@o4Exh0i|uFae^1gA$xZ(NR1Kl^J zllt3pS?rj34=t?EseilwRGag34@RyujltyM5|@t&MX1{AR}vcY4OuRvl=&Y;Lf)*7@S=$U0xTO%nsOrkaU5UqA#R_dL9)LDJ_~B65Wf?s^{UZUcfw2-< zFPd=dgDLDilF0zOQQp$NLQ2NRTG%%n&nJKdPz6ma+bkf>U<$|nfz=4FWCt87(w>bA z+oN%_;+~UZxB8(`!Z>C}$fpy5`uyhJVd?lOs~FKXSS>x*4iU%cvSo`VZUoLMO=xbH zdxc*SnxT2`wg2>nu2YI-U1h*ewn2V@4gDY3^v26c^w1&>tbuh1484snfAzZBnuZ+q zC0}7Bu|%bnYHIHmbG^;!EZt7O;7h&ecUwYxWNs^SvwOBoxpTOR~@8P7dPm|Q}8 z9c7MV5f%AG7mEPhe7y}u9cN{3HWvxW7B;5^zvfFe%2NClLT`t3<%$Q4;B6@ZctHP>vfr=u{f)A5bTWXqCD4WFNni_yLy9;+HlEt&B@`N| zgW-mRJ~5lfjogy=M1H=XV!3MbBKQ5u?yYszrz@>maJ;e}#Zg1b*PYXt;ZEeMDl#lC zuc7m%_QnbF^kMiYYYt2MC>&GopaT~x#hCjkuY1>+I%n4T^Z2txOGz~`XX8Z;L>>xN!ui*5@BX-@dEVlf z3tKdJfJ|F0TWA(1F%7%=+SeQ8br3X4Np33p(!_RfpF>QI1V6K7`zd98hpPdyuK{z# zCzICty}_o6x>}eN5)j~Gx~D3|2=s81A}5ut#w_qm>_x;^k0^cN4C&{aL65v`H9Asn zDDQu2eV7&Fd64`&Fn>RX`gfSeqyaDooQk&kZ*w@SdjVk9enEz;nVn97VuMUE#9sYw zx}7U630fZ2Zh!1%sV2+BnOVoBT$L|m+<=^fJQn!F*_>YDa6{i_U-`>eSoInA07Yy3 zVK-+l=C{6ch&*9+hq=0_5yfSpUpdMFhfSK~TyWv5h~u#fw8ABxNKGX}UonbVJKmKO z)WjAHT9FG_)8wmKx}Q9B)YC|)bhgoOv-7e~^Dpaie=(sn;|EI+&%ZIT>u8jZpuDzA z-6u2q`Z)%_6&OjN4{4T93h$_Lwxy>7pQJFLZ1G`|-O}qUi0JizrDxTM>-J>)t<|RK zD|#7l!D+yO7-npmn%sOwOGZ&GW}k=Tn5w4MJU)BKdqK(lK@ z{}!FU@BRG=om;4K_>08QdBZ?q64>a^iK{l!3GYddJ9ek#`AS|VS=Uc^-F|7)pk&FA zm_y9DP7W(h4*~sf)vuos)-by{TQ2(AjB&{=K+%#Q%*ffT$`z{6|8bq7w12Us61=~GJ2Gz1I@$B0;pA)A`ehRl~8cAx7!l@^gt zJiklMDRXDf=jv4#itMH(8um!26N7n^|y2VF-E!61(+iZVYi<#~6C z_UxCQiBhe1>0h((^chY}FlzD6KI?obhvZbt!VLWz&hh}9i!rFtWMnOVpfgTi4NLlY z&u?^&1kf3f7yfz6tFB4*(Z2=rcklS8Fw67(Z-H6!d)6@C&D-}E@;ieTghdrm6k&3z z?TG!KhB;fjn(g0$`TMGdpTNvPg{)X4{R5lh{$z9MA8byene%*ChA+4)7al4p1gz?Q z_+f6t>WeEAiWrgqoFgt@IGkYBAT`Yn%_QcANR^#{q$*f08r+m^p(5q0J*fn*7hB@n z0yZHH@0I<@G!*<(3-%+SJD3EWvs1Q|!()~$VkucbT%L56RT3%ISKr}r;b)k>x1`N_ zU`S$o4?fL5{HXSc#+sNA#wpdW1OV$r;Vy5F5cOW*$fQ zWxQ6p&_Oy4ofCR(wyQ1AdG@8C(RiTPj~LIY_16?$V6F&L_S4rENYD3 z>5X}1FD_NW{}r3H)-ev@0m&KrS2gf^zxx}^v47S;M-JyDARzN;u$b3fz(!G$9%5yz zyMx>%H){^>`6o+|VK(;{Pft8;>Q*DUm-Qv7Wr1K41%f_;9u`r2wz_n60#VZ!n7iXu z?ZibX8yN|og>mn>AiK5}Pid4T&o;?Csg;(B&J`n<6wRq6hzc@_$jFprNQS?rpQvcoTc`@a4&?e{dEnfq`sAYu%?fE;nKcG z=_-ydxA)ctPxKM7^;J|**5%HTSm)9tc9As>7i<0HFPIauD9{_<#WT2@U{GqJP;1)Be%wmHmmQ z|27WCz7L}LbUwb2AK0)a=6#&1-FU30A|MI#2WBe~qxH|!bhO;A+SlRvFYpa>>k~^T zi2|ACUM?tI0Pu7m5qjf49uOis1Y+1!QdyZqg9v+G`01Vura-sCP=;~EBEt$HcZgit zr>vGPGRRdjBQpX=62e{XWdr-8@!;10GAxy$kBxyZ(|URVt?)qoHQL;iSA>tFUSMXN zh~E{opd~HCQXYI!oOp)Cu7g^UOSHUBd-cNv#oL=Szk&7L_C=2!L#^5YRTo^sJ`8Xf zohEzC7_o`aiD_rAMo+)-04x6i;6}>X8I1Pgi7C4>gtzdZQpp+n@$!cD=T>rNM_R#sKD-1?$j2}ag>XrK`T|Y&#R$AwfUd>HqV?p zQoaM~S>KbanT-tnH-fHz0~F$Sf_4L;l$9KoZvR10S<=Kw2ANy+!h(S*BGflTa!ZIJ zJ@oz3G6k5D!S1oU4eU?GyiVPt<{JEE$H3KNdU0dkH3G}c;PdalV8q^aLvsm3-h-2f2I(d55xdXZcp8Tnx1UfJUV&NRyO2q@=WT!ap7uD1E ztQH$|@Z@{e(;!9k6E;c@V~E#K3>K2a`Uc{$RtUZ<&to^9HXI9gCkEhoFcgX*Xiysz zh*e#^rZ^DU;-YR9852TeYgEuqUxqh$DzO)B9MKB1?$ItoohOXLvXK0;9dhuT})b~7_rx(E8azNPsK1KX-*vfBX z@9&R);@KKA0o_dt8+___Dr?`c;*5yYq9wSXbOpr!?y;+!N(%M%22D^FvDFP)Z1DDe zu%cNew^8nMml|BtC~pi?1*T=;x(9n1Gd^pMm91kE<7lsJ;z-|#C}F>2@8f4RqA(*k z&j_#5muZ=A4EH$~Yt6KhX$zPghjW{BMM?&%>!43zNqLfUtQiBE$#~O^v*NsKw3gw6 zZpJl9s?Q|?=(&QDeK6R=u<|^l@6@1E4Ea272cLIiJ2*cM#E<-Zl=6A?$#)t6LNovb zyx;QP_X9xwHu-;l_#r}Ly2Js53IYItxMMwY#3#tS;*Wo%G!Fr}UkjIJ*e!{8e^Y>7 z6)qCZ(DUrc$kAd@&6j70gin#DQH8xkDU(PMff5|!n_8tQSKm58sCx6!HI47v++vu?GWyXP+s zs*{Y{^$I_y1QaZ}iKiBj-9?&@W|ll|G`7nUc~x5U3OjHb)O~KqNUlHyeQa2M2)-Al z&I!x*=(Px`V)bTZ^}zFV;yz z*_P>(*r;uEOvuKjs&a{*PVcAIU17Af$ig>Gvn|wRUs6R=kAt_5pUTi==jmckIS_$R zKw$?D1ouF=Q8E#QOXQFfhxz6wT$R)<1OkiqgzC3Rrh_E0@2HW~qNj`9THTjj+JM7y z6tG8vUzi>7Wy~v%xg34AG+r-eO!D6`vv({mPrh9E70$>=GaeD zN4#1)IpO)_qQ0p9pn820*~0?(um8C$!d0Gi{#!Bf=cN7jF_NJmXqbVHL6<24!JNX5yEQ6Ov@!k>Ie*lSL)kS=~cvuCX3>MF5n4x zl5(?H{7Sp{Q7+$OWacW!d0bQYdyL?GixKtTVbRQej2@b6&J)>r$*dR?v5b^Nwg-54QRo*1A7e(TLrW4!v3BIA zVR9By{9=Pp`rI+uAQTRgNhJHdQbTRm>SdC>8M4=CXWb@aCVFr`I*HAC48V^Yqv1*z zu;EAN)=<b^Nqf zvbPa#`6V9T?|=TS?fiQ@(J-> zv6-}<+>1W@Nk6Q9264Fp)Xpm?{UPi4eie(|)gf&t9}j655K!Ry*raAL7j5eemWEzv zidw~$_SY~!OAy^FS608|!Vmh;KX3uCNZ|igY}jseYzYE*UR1Hzl6EWn~ zGxH;iKh#9!>y(Yyu;UqLTv+XG=ZpoPl=qgD7~Pc;q`n&4_(+Z6&FQz&hzD-IzVf$LFRa;ldxPgC_1(-sZgku_t5QQj(_BBB${+W?K zQ{q@sl~{}=vY}%&aRx0tUmO%6a;oUqLItmpzuTHaxyq*)t!|S5Y~6lSp9pXa7pxdj z=7_Ys*KY%+pr)lUcER_W78#_^QpHWiOblhd=z;DRhCY2PACn@R9@~@E!S!@VO1+2xY9{#=ISRB_p~%G(A8ct%hbFI-^$Upd$0_UNS}Ix_nRA$HpVuiNNP| zlt2_?7Xf`f8ZP#h=6sXhEiKQsJf=og=LWNBEH^Gc#bdMm^Q&Lt@q_C12Ohr_FaZ|f z3|w%3?Uoau2*8^2dLv4P)P^hvOo>H15wVB0+uY54wAQaVlu@}s6=r;LwVaKtW_OM4 z$C-R$bau3vgl4LupFi?&W7BL&6%(XrAuEx|F^=Zvp$k?qfDyuxgk96y!xwy{lSz(v zWJFnSw?g=D7O=tsG|-3TmB-+FX$p&QJPPa81#f_H^Uqz)HKDZPFY)+6p87{T$o_#J zN&n1`f5xNjIO>Ojev*|XefnR-!)P*w22jwg!~p#GtAcK!#;KG#gn|m^yfRc(3I0>yDc)0STN^3+T4}ZtumuAdHd!m#wU-JSfx{Gl>Ot>EZW_z;Uuw zzhVB$5oTh;wiyg}?`v$*fhDE-73^nW2s5>a_l+^OM>0zdC%99B)i* zj-Gk1O<>FDkd??7LM2coS3m?V&R|?vu5Vs^Jp&al8OaS#nCcD#E?j>)xeJ<_MT;YT zDHj>n*DY`1HmCmJHzXzk=FlS9J2cCDQ5Jrw!jj8<^_aV}28-b=>4Zyl=EmvMT$Z0= zvq0wh=6$hqW?X?8v{x=xO`&-QYX!~A(6vLXswwOVRjNBHl77c>g1wII9iA-~?M3qk z)9aJiznHchM*OaS4akq<=kJq7ARtkD+NJkkgnnFI)_X`j8KYuCL;|DwNWaM(kYaRlJ@t$gA$UG#e_;U$!coW&f$_ja$fmY1+o5oV&J;5( z$tcV-OZ8hT>OU%8qKP_W5ZY*H7Pe#@U{HqfS`KYGRTr4_9vSlwCIT7_d!JqPgk_iigqaAyQ24JQcWM%p(~Oh-(XXt)j|l1+ng#~?7qL=UPnq$ zlq3X~Hq;~by|ndqanJXu3p6}eQ_Ll*`tUI|s}`+6R&bwH!TepQMDJ2R{b9rjh{<$t zJI}#I8c2I2z3(+dUh4jp) z7m!-+Ajs3@ww$~P%~pcQBF+JO%kk2aD1^CIk88Ih12ZaN2PkU5Mlc13e0n#GL7W<{ z9j%|Z{NfHS_Ffy+e#q%GYiwg{*LMT}0wtuFYX|^DJ+l_AL)U&%2GeQiUjy=E|NHk0@po_$D`l-kX1hwmEjFmti4pUmqLJMtj4$U z@_q(}lng`U3VWzi9C%g$pbx0zEpzxSE8%~7R6+4aefa+`xX z1~ZxVT;@J}_?|tz;Wd{~zLT3-yu_e^#AB;d81SPtkzPI1{hemXGXIUMu-8j!#1RDY9z|93TJCyM`W zV`gGjU;1l6e(Y5L(kR*dZj{7*H%fkFh`y|f;duhAqZABud@i8Mbk2Wy+y_f|nI`;- z^t~@CSX9qu&pL=d6~HK&02n2q0Hfrgq^=PxH!+C5z&#}-m&$2N&E$~US$L)4xaRlP z9SiR4yHTPEFiQ4t07(@~L~iC=m1(h-nYFIcTqQ;HPGZu{SIexzumv!r{oiGXZAPWw zuL1eXPE{E(R_Yd?@^41kn-p3UG=P!zXW7M$0+d~fYFBaa<^r(5p{czY%g>wk?ylg( zeFanC;(&dwzMFuu>)NCHEo5qn#KpdaOou6zb(d-W`-o!gHyfrBIBs)`%`zOhC(Act zf2%U!cbqqXPPO7&*^Lr2C6CN4GD~e;{vI+7s??P)L)J#HK1@?isKNru?xO*KAx|2s zkHHW96dI9u<*QuwKX=mRH0<(z4ai@1s%S|6*8u6NG?n-@Ab;7ZqP_gT21rvK!~0(Y z@?)p^_c?@BoNYI#yQ}>88Ym!_r~n{KIxqsv)<1IyrzcpYbwZyV<0KOmX;_j7x(Nv0 z{N)V8pH-$1P-X6f|GvtY{&STf8PB_~=i2+N%1{HU4CY(e`M<0(l}BtH?|!H<0)K?e z_`eRB3O;h_Ujy=&ovQPT0O~fN{XC6zS07Tqy?{hfM|*;6nT~#$7?@Uv&+R(g*d70r zR0p=N&S1sEq;laBCOg9bW%udiET}69E3VH(E-|jRQ(oL_O8t}Hl(+z4$hoo9qzzlHXTkjFRM5@$d@u;`G-8zH& znLt#EQ1Z|UXzIUdasP6K#-Ah!h#vc9tOa~qpa3w|04rXk+>zd^OhKqlA0yO@4GK>+ zuzVKcJ7YY8zaNuc;Y?EdM0T;25hRa>Kd81QWj$%jhR4Ezm_F&m`c?!hvv)H2x`cZw zf(-AR6$~w9L$xPaeu1LTIS)+jD=#%iTQp1oXFnfUtqQiNl2Db>c|6cgIN6f4fMpAE z?-xw_3-9{V`=RNsDpG~gO6Q%Qs$;d>&);3v zf2ofBc2CRiHtUb!kzhzF@<(_8RQrDs9v}Y*54dmP0hI6(MP!pLE+`RH8c<@-BU{P- zwiY1>uol6O)cL(`-nw9w_M7pp1oj=w_^K@9H%H4in7>t&<_N3{BLEwIT#Ep5!N}I~ zt;7noBx`CfI(X5P76Qbef}AeES_A|sE6BU=Y{>Y|hEIQ0Q7#~FDZ~M6_yqVO0ek`E zLR)(?Yeyq{1tUkt?{_5q!!sSVzL~YYZZ<4!HEqob5Z3xbhi^u#Z|pj%3bzP!%#KNj zuD+ozaNHY{YyDhJW&q9kFbbipuN?+{oeSzELea)_Rl85fBogq|4+Ug z<1b%M{%hzk{2MPv|2sNrctMIm$oP^*q&bt^G0wt8nE0tsAx6Ft&%8d?j6X$(o+=74 z9uT(ZfPNb31tgH6je)GajjaQXfsMVQ1&=|-fOdjVOpSVcFx8kEx9Y2?e$XUz+^3- z8ziYgDLT`1GuCVtqgSN9B%)&0H8exv)*nHpXw9K#F~1>4LH3X#;OB>E!Y9c}4Ar;^ zz(Czy}%FYktG!p5t+0|X(1^QlbW)WhzNzWsProp70K2zmfrh0kKdg0+|PY`&HI`8?>+PR z%s(^F*Y&;5y`AO0PKZ=x+V$P7UXMx^%eQyklQ!{ozaL^)@8~!!c9QLvV>dP%j^-H{ z`i@?4ax_1Dxxyv$WN z)5R|=9v(5u5C1+gnfKs3(HTzsp1fNm-E~!TDhj67xH~t@7_`18ePjPXe^uG<>^b5m zdk>DjKgTiDy+nI>lZ4G);U^Bd3bFom$=5{m#A7)&S@3%Cb|>z%wzuo?v$l&fKH|JS z-b*IhHm}8__maa%?yV#@`K!$qKZ7@?7A|@hDwCPw@aUp;K)Bk4Sd&LnhLX>7)hFze z7&P1%m?Xw$6)hcZp}qXwpGwGaTu@$|8H_k7PwM-F95VAEZ zJv29zQ}9WS#g9kNrFHVkLMpY-TS^!0%?alG^isd)RpZl6o&*6$Zp(#B2YEU!8qKh_ z*R9D-@{KuZdLl*3)ND?tyuhsYX+0(uiXySEx6>w1UKBHM@gsNX!2rwRpHgNT{sYhK z0&^|4>fhRsDwwXd%F%;uHA=u)(yC-pL^}@_@_$Is?frEz?^5FUU+(yNAX7OsL%5~wkWEP`=MPbTos<^0 zS(XvHT~aqSTDJ#ZxBV^4d}T^v&V!8jGj>0E*Y7!SYh-cDUB}iUi&=(uKKIVNcPZNA z%Fo)7$DF6;iu10Rv|`KX$8*<&r~8;CZU}WhC1oW)IVy2Q+J{2tkNdM$*iIRAyuSWy z$C9k-+1d(CX63w@f9x1JILH-x;FF7FN_d5x*Kl~+x15tgrtgwhSeE#x^WDh{8$8=s zG~=uV#`YX8|iP8vd)^{KbYzmfd4Y_NnDehrndIIU=u43zjf$1If>8o7#_;t zHdLdjNJLR5@xmR+A50y^b+0*szWdKaqVX?9^7p6i#bhMdh0XtY>i+xTEcrLX@hph# zHT_#HMhgz7B;!yV*a-*3X|WI+FD6Q(Xf7lIT`U`4D3+`6HhQ6 zc#U43`{4HNDbe|V^l4e{R@aG|E+LTo*vWNL$_m~bUMbyUwHEqkBIb1`Sg7f$3mpDA ze#=QJuDaU5Nu%kB(w7p97ap8^TvmBHZSkWoC4y|8`?YJ%h){YMsXWkDv@gZqAf3xi z|Ez#xMk}B4tYCrLM{jG!f9+YCXL#sM-A>g&!+<3opX-{xMVeRFyo&wQ-5+Q#JaAcF zI<|kbZ*=GA4aL#nb0fYsrheaE{)h-JN$30Y=2gYz@1w(?l-3Tt47l~Hl*#GzxxV-!GaK~WBz}M}|yW0H0d_5ABo>rK?c5S$B z@9@=(%}-*~-d#Kv-lt&w{!nCmj>CtP)ZIlq?>#lrmBTMNUcTwCSmtZwJiGpI1pjl( zkz1-&uC+cnJX2pE%opOiD^Yo*^Tw2iNu#>9yd}e(YmH}k41Y*S)OssaR?}YjJtEuD z-(B&m@5NEoeg4X-b?3ht_wg0$UVZ9brRnOFq*OKaz-Z8oE9p^67ZRGPJ%0UAL>hSv`@3lU-?nPuyj(+6Wu|v3-%l>-ls#w+h69*dEsGI@Y{veX@jE+6V)^3 z|NeF5{9D%3oYmHx<~Ndws`>eR6H{{YQQhKt)4y3AoTarJ#9YgPX}`hx`#zg zeHvoD)#Je4I}2M0> z=XsOP+jMGC=86`>$cw+QsjuQ)*0*bJQ{zX^{CsW8a-M?q5{2&ba%>$$vJ$kMz4q+4 zU2y8i{_{evIr<0nqy_}{p6a{MYdfdN^>p^ShgM#lvTD{-Vk`DF8L$@?*iJ87$-(!F z9;ZlVWXz-dZTNVGpV)GK`NyNTPj&Sg_$)M*`oW#_RmvsSyfXf7Z|Y zW>DUmdfl?nOt0JWbBcwySjk&??xHh((#4XGVg`~s+ZI``2~ZZRn#;AVa9fX{r)-BX z_x9Su5@*bhAD4W)?AeF6^XCi8AN8|R&Q4JI;n;rIW{uuunb_vw(}M~IPS+x|9XK>U?OriyO7HPfUerl6$T9PPG*<-ZY^p?!Z68V7X8}m$OdbG0jeltoQO+QwrIdX*a%XK4n z*~k<7eFUnit@<~J7F_44-gI}xaOM1cql)U8=awyqh|i!!_w?>uwahI^v8Fb;<=v36 z-27elehKzj6??2QExPrgu_REI%`(W*ExRuITZ+!bAp0TDO+6Vk9_Dc}9VV;9KJy2v zc4~76OgpTk#vW)ikfbnpzVfB_o257Gq&DV1+~^?WP*k=j{@1%LHj!PSaZ}4<(@nKc zvd5I~a9yFZ{k(`^x=!Kt+G>91R|dVwxx6=%-%R3pZA$%@3f=hvi^jNiLQr= z-WPrs54wj zRt5FdjqsdWzV(5k0awIxwhGUb?Y*nY zWiBUj9x+;TDI)&Df5g=`chr2pn`E!l>8~gtmwc?+cG1)QpCvk;`G^GhMX80=ZpnI_ zdAs?hc2s!hzJrs4Z>_(xH04^F`f!!1htRUSHJdY23(KQsBrN7E5w`PPG9y5}CAy()B_F<-(1fuBPGqd<|d@?~RJpb7p}Y2|DFj_woS zen|)jxHx6( zStITR!appYEIAMjCLU?B@r($jOpUtw!x2!vuo&fmr4NEa+%tzMJF~w2kqRiQK|^6S(pN-M zh^G-SMb&dq$QMv}S&aBV(?`TYyletfCY4>XT5dq2omt5S#Zst+q!4dtz!aU7g2D$X zG@28*)kq(A$U_PYREStqi1!*`Oz7s&olZhD8mAj$_#!cK1!xTMHUNw%{{8q=KG4GI zLtsor6&izVzS#}U>+HbQ$-{Z@NW$GlV1mGo05>J#-^e(IoW!3k0B4A27L5ivG37Y= zjsR>6+UeuA4K*An%vvmC3;uQ}Xdi-Ty9Km0aap+4`t~ye$H>T>oU46L>*t`xM1+RS(b63sKgZF>Z)y1C$AXESv?7`hk}yj!`@#rFh98&+ zL4TqgddgISi@%S5;5H;;B)jfwFPO%R{oo)1RhST@gbZ_AT*E!yhi38DuJPa zMA$ggT4{iDI?A%fd9;^6z~>Z6IZ(GvISj03vQNOt(1~FRgq{rKY5<>%u47aJkqjgO zpN#C2R01OzNQ7Ub$kEwTXtbM~;6dxjDP%H`6b1MBGc-XLS*rmjV^KC!AoOG)7d7}~ zq+FpAh-4rM_+*$BQ3;G>AQ4MW3*KK2G;~=sBCatZ=o-jX6<&kBPa7JvkOaI&b|sa- z&_E)B&lemJ1{#7a8c*&rA?O;&WgT8)-UBLuI2%X;UgLQ)mB7$IB7(PkXx0WAek>YO zTbK}Z4dl8KuMylvB@h}&0$xM4ol0P6AQ4LsTwl5fXsiJmLX@eLMst44grIAn7q(On zX2h*-DuVDpBd8)XnW6~mqaqk0XvQ;ck%BeA#F)h-x1WhYm>`!0coXRnDuQ8xMBq){ zu(4CR*q8~Du_*s)*J5B2!eS!8!Ned;kShbcNdzwy!7xE0@FrTnP!VG$NX9#Fzc2-0 z62@ZU#m~ebOpwz$-lSHXieQ)^5qOh*^Qee16C^{9t993EU}DK)a(zA%gD^qP$#@fG zIVyr-f<)j=x|FDhF%u-i_2EuV17Na>#bmY$6N4~8PN;a3RBbAPVS+^9O@7s(BF0RR zjGQ;yQdNLS0E@|9T_y%$f}B(FChv`?2!;s~fj2p8PDPBFAQ_7F+&=VC*n-8Rc`Xxz zFhNeMcoStCDuQ8xMBq)j?5Kz_6C~r$?vJWE$bK^0fr&wwAZJ#*Ns=cO!7xE0@Fpg{ zRK%DGl5ufpNrfpev0^zV;eJdE!UR3JQm%D{n5MO%U@C%Pf<)j=u7^_*V*Z<2U|iWoCNGFtWx9iuNF*RzS~!7gb8wT#ha{p zL`5)6kO;iV&!<$xm0lxCw1gdXS#vURc$SE414>^7g3WAXjBm$q0j+s=%SU!*p_+!%K zOfMorFyMS3vkHGEn50CDF-1b;13A&-^ARFTMKJP#MBwwGu1H0UdopMM~MBC6v6iCF^)ggq(%w<~Ts2sUm@Jm39(Xw0ImK(SwjcxE?tPoTW7mkZ6KPAY74~ z1bSL|YZ5u91OcL!^5X8SmBTR{6xc9*Y zeo3H<4go|whonFDMf$?!aY9Xa(1-Bw@d=9w$0H5U#&S0+$$KqQeh; zaPdMCW@vUWj9D#$tD~P-xuJT6(p^G zGmeJu3vj^)lGbL2qv87kTrGj5_1NQR_`U#_8X##jM}mf4cj3c>x9^j{5eATT-9_2V z53k~=JAJ?lT$E$FGZ7xN^Bvw9Pi(W9IP~O%pu}Do7z*zmBSV#UnMkC>u5=j6yqlav z(rCp1%Cff{{yFm&Uh2_vp3_}q6) z2uf@#oH%rc_e3HkHV48`cvBylQ!by0K#47WFcg2Hj!XzjY}13G)SgG*Lq;C+d?ykq zxE=*XC?-z5GBGN$L&N*s$XIp3>z3fA-2XO8AbT{tYmKBnWKdnGd)Q!8c;5&4>$+`3 e-$U;Lu`uBRA84V`u7H1*f!DX|gTVWuwEqDP|1lc? literal 0 HcmV?d00001 diff --git a/docs/sphinx_setup/_static/benchmarks_files/OV-2024.5-platform_list.pdf b/docs/sphinx_setup/_static/benchmarks_files/OV-2024.6-platform_list.pdf similarity index 51% rename from docs/sphinx_setup/_static/benchmarks_files/OV-2024.5-platform_list.pdf rename to docs/sphinx_setup/_static/benchmarks_files/OV-2024.6-platform_list.pdf index 9cb20570020cc8fb73ccafb23ee817dfeab1c4ff..0278be391339533ed1b1343335ce0fdb88be975b 100644 GIT binary patch delta 93497 zcmY)Vb95lj^9BlUY}>YN+qP{x*<@mEY-?k4V>{W{wy`lc$=%QQ_rB+z```0)RaaGa z&-B!(?io3T+kJ;i@CD^zN&Joj%EOwNt^&f6c%}!#%EHCM%+Afmo~!_Z^Z&9YCy*-; z5bk6HFk+z2D_6d>;c`{xV5sGYGgH3IjIOb0ISoxuG5fRpbHo$`KkDEvvU4fXLrN?f zJbv<|f1}A;N8jVE`0Zg~VbscKyY75OuX;+Q4QdT3722 zV|@JQos5UuclWoO%$nM5BBEEje$CHz$NBrtDq}!YOEO5AGvnq<7!Hr2>+SI6RB2R- z45+|#h!vV$W)-Ll5Dr4s)F9TITa^t)i#7QeAck18HqlosoGJ9Zd8y|8_`7xWScQev zy3{@Tlg$~5;cHkDsR_1M$3g-sTOtByXZ^a&pAskg=ksK*=7|vjkpTD3wD|N8xGu6TDs5E1m)sg;y%XB*7roMA zk#B~o;g|NS)dkVKPzu2QdFuhvkp{Wfu|&R*gi*J0+F{5Zo`Ip;7w>9aG0;&jdqJNNEu}Oli%=R0cbR9r#wh1tQ&F2Q zO8G9oxSm;lHezHWNNX$n7sv77_T$#LB5cGAwXNw-x-wH9vG|%vcZAX^lF)NApht_9 zjcX}>`I_+d{!GCtzUQ$4t?gnHY70DIvrb!y)y#FSnkT7ugNH)1qd{X+tcx(K`K~9~ zzE`?qP8|9?M!*W8?mC$I2z-C|gK9u}F5fY6?`|8=p?>DS&<19-55xRwdoatAAM}U& zIS{iEPbteANq&CWi+Ns=G}eN97fZtcOZ9=Ya+*wO@yPppXb@V!hpj!CodI}?>u@Qe z7$SOgR0_Wikl4e&{Y=?5pyWCjRUXo!7#mtJ7ELp5fLaf4=dF-n_=q^c5~APUPC-V2 z3W}1}K^mS4iah0Q)h@xT$plFxxBV?{{^6}Z!Go+K(-IImjl;GZwobF7kkCQ&c7TK@ z=;7{AzrM2_S9zaQ$%tg9bpo^oE=DU>q${uAmYH>8a>aXh0BRP~a7Ae9>UgQt{Jvu!wfPmIKY>aH>Bms4Ke@6(Cf9At|3_c!q2ys+R}aTtb(6c`0(w456tm+I>8o*sD?_E_BjtQJZbI z--E60F7(ill(LS9!7GF`b%G`2_YRqe-ZbA1Q=Qs{pQGj+toQcmV?jpj8oiel%+1KI zR&3wa(=EXwL<}p2B?1u!t(xUe^{W-ZclCN^&bH45s}+l~WWU`{Y3YggTocI0z9P3| zR)n4Ai7z}_a2b1g89TtED;eRankl{0H-b!|sGS!H8}En27Hq!#sH;Q0ivtnu?V$!K z!<_`JZAK70McieDs-2I4BjVOj$8=V!+O1F=zkOot<5*HMOa(@G8IWLdDk&DV^>FNH zTAX5xLrL30nj?JjoI*G?fon)#E}H-{-T{bmbxI%Q1_4>VK#%p}?;tD~QiP`VkrM=< zEUDuEP|yTEC<}(DCB!dPd~jwJp(Rj7SfzZer8dW(>yU1~-?Q;5077L|CPa=|9JRJ72$e-;K)pBVbn0$Iu{- zlC)aTFh6Hj(Qu9%SGeI;uVejl51GiDqa2bO!revE(QJ847r^ISJ;72DPh9^@6GE=l z<(78?pxW(2Q=fo;8>L(MoJ4>Y7(nyJ1vc*{vD#qH8^IH~zsX0=|45z_ z!|r@dQ?pmih%GZ1A7g2Wuogrs9)Q7ap-K=x&LocQRYeN6=hIg}Wd1b{DqM8Fxqj-L zKJZPDDDm`5=zOs8&U@NhL&>V?zi_o}1p1{r*+GokF+ zdzUA)CBRZwo_429eIX_B9b0|Tn>nVr$VDGJtrD*3m|Zi_aPoF zAL>*V(8V6*`RTlf`M@DYZzXD*_!%)9|Jale4&bhq+NPHYZ%_UjFRm&b^^6(=GeCOu zYY$=iXOF(6p4j$V&lkqEfBWgLL4Eg&lUK}py{^NzV~wY!&ucgQw*4PQe-FgU$XY2V z8q{Snx(;ODHTSzA^lIc1%BydPOH!X$$u+78)J?q;=4JhGk{U`rM%5kvBkz&HbUgV$ zoGg;}hHb~gqNtB}UU1>hs(MruIxfzrcat)Go6x=T%bNlZvY4w?-|5cv2LU;Z9hIfM zpi4QIyg0>GCC2bp?s&_)xTqLPir` z-?dOoF-i1_u6-mVn~jat96|1nWkx^}mJBlw zQ*0*E*)2PIVC={z^J5*xlB+9gD5kO-q;Sz3*xKXz_?Fc{c0ihiTe-$hOjBg@4W8=l zfFv%=EC*?#Pq)UV`3Ek+-JCMR$*l%QB<|B#tJ-a9l9daiiGTUL@}JG7=!qUwZL-ajn)>eh#BRfS7bXO4NoQu?O(@d7V&DiaStk zQL#T06!rAC1ud?j6iYONTskK?gC-Qlc761IwgkHr#5)3-B`7NzEM5-Qw1W=oA&NGa zQ&;{-R3mJZLW1RLmsHA)wtP;F^RL;*^9<{*)%h2Fo3F#QYCrt~D*1FRVGQc`X`@g%;otp;pyduz~-k5gg4t zESLX+w1a)MPilyf*RgucTtnrqTY)ALmaT3L$bHF8RtnE}g9z<|IUb^|O3FtKWT3u+ zKR9DiE~CFS6I|w~FbXRHCZdWBN)dtb5-CtT5JCjsSOtW_^`gWc7D$lQ&K`O^&B~N| z!cVD~1CxaO9sso2CW{R2em)9YP&8BSQsO!Mbhs# zAE@TP^Xd!cVhcpSh?b!`tPlG189aXZ0NtXq)!XSAYU9T&E1bTwP0XVvje50Y9i;cX zCE&*fBf@V^jX9BZa=$C~+4PE=xmlX3WCVS~#lqjM#(=+P0^bR&f%X*ETAv)4ROgF_ z$P|-gR{D1pgDrqyT`Y=qDF8#4hG`XMpZ2c0Ur~7GuZd}j$EqpIwPh} zUr`{TuI&ub)M;KYBpU97q9G>zA!ZQoE zX5jp}fdIz&p#Wx#Zl=H@%qa=4dOP z)3$|gai;M~GdV%H;AS&E&|aWBCAz%j77=_P0=D5$OksQoyPak?aIFM@@m~}=5^L#TW8S|@P2Xyop8d^%C?4m8DvUpWS zfj!wUwtOQt9mGR3_9SnO(1>h%O~hl9Xh~om(x6zHXvr_JPQ6q3Su=faY_;OW0A^H| zeZWLMNY$@jI$nIwL>?@)ojK`UuJFQf;djBkML3jc6}EU}lpev$~J!`g9U^Zc-QWh)5NuZ)Gi3`8e>&dwQMF<0%GQnxG6c`H?IN_+1#6&ja; zK;uoY!m$)uf!<{=dr0(}K>vx~!vGwpDGtNkFY3|OL8vxCh-Xq$W}Ef};OXR??eiF3 zYOMw=x$dzWci#c!+!J}((>0b?1h@{`HXG@dF?Kd|G~aOb0Pw#FBsTSKGw}Z=kR+4H zKtLQp3Fh+1RsIww_^ZZXs^2o2OP33<=SIs~goPiepRJ>>y z0WnUYd3)lfK^Xjm|509{bepjReJ|{}|5TF{fwo`f(Cm=Tmf@zNGs-E^Tj9&9iSQ+% zo5-GSj$};~ETJSNK7=LzBG^38n;?rZX8S+3VfqUL*+(Yf|Co~q#=W%8VLr}(9w&AU zxZ%Q`?c5DX)I?_zev3JW!|ypx8(NNr<`wcH31g3uZ5l)RSeE^B(hR(YUxSFZ@r)dQ zsK}n;++D4np_1yi&buLUjRficI zGWX;6f8z-L-t9siV%w+0CiZV7%BNS9Adxxzy$aVJFr!mP9oyj2yy4P#;q2&4PWL02 zzaUa(vT+b#Ega3=k|C*)k{!@SK)E@aIMB^OLD_hkvN5+Hz|;czn*8vYKmknH=^kzB zH8mtSIE|#WqRg8`oSVKN?3fSZ-9?$3zT8D^0Ypee0w^SWNDEaW|b&B&`FZ;2_t_izBZC^z6 zC5bH_T!sJ{$rZbVAC}yMlV3Vt0p*P%9#jVZ$F!KcnP+|v&<{;~1$G&mzZUbCI!Gv} z4Z0MPnTYx$2exV)Tf5mi7PbX)A&48AGOIzPjXArM2FKqas9_F&NdqauB$-2s6mtcH)P|VqwtK3)%rqwQ@?8*J`!ID9cmJ6y|5x1czJg) ztiyJcsujtnfynVn)uGMoaQ{!-a2G9EgsRJ#-tesZ=h*==$H*wFJ7lp^o~~RoiM-w( z&@f0p1BH=;UOlMpAhwr?67(N3qj%85n-LBhHIU?}LZiPSR%J6bw_QP~GPt0i4Z24` z{#t(z9L#-Q)#`ZFzIM1`jvo5qdZF%!U*x!I!{x0;53Fu$I*C=y;8dW6F%LGgnLXXD zQ?Dux!JR~OlRZIahR+vFsGim+nN;%UBXO z$>M)`1Z+o|1xHO)Y^qGpCV%!$~m+UYTg>83lQ?lrq9Xse3nGF_wvQ2 z&+o#d0ZCEnmjOu9YhqWE6TX8xaA-EK8tSI(!>tzhLL*ugr^N84O1B!aNt}5yWq>HE zWn3+6Bu3q6OMg1x#wWQ2GR)#G$Sz*L2vH-KLy?~~@oA4DM&ARWUz^tsyd1Kei=5J*>jN$@;U;dK1G%^HO@ohu83S>gEhcQWugSV@4 z^-Cga6?BIU>DEbVhb+<8eNw&-4Vb2H}6nyItipXlcco3Dx5_r z=$|SN3HX%0FzvkLypN^)52)@Q((dQNnWbA11_Y?NZwhF}Y@kdxu!@f{59F~Q;D$85 z^6+Sww53mO&F}C7Di7Q^lx|VeywGeOiTuZ?c^=S~zWfL)2`qva#AAzOwgA`$vJZ%^ z?1>7vz|L+zh;DSA@!+Al;C{p4LcxctYwsi~m!)xY8l9P3Vx;zw{sbrDJuUW3tJa;hTi zqfLq3WQE={$%AUqo6d}1z+ts5(taB}Owy zBRleih)_j`-U;=L6XjN%zo=2YC!W8ktc<7OJ;sVC)goECxpNeDLiK?k2U9z0Jd4s# zJ0`A(YO)@Avi!gW`3%Jxe~9(i8(rMj77ngK-0TW2@$Xym0I-wMfIH`UHK=$7jo>Bs zX;vQI@ve~G@VQA)0bV6FOl(N%WTz74(~k7L6ugK?0__S@hdxKNfh-(N@OLf z2C8HcF~TrX5uh&{6?r!whd~cRrjqchmbe6V4d0!ik~~ISB*vt*tIJHb6hu%d{4P0Ixkfe`Xq0^J9!~XINJ< zpBc3%_QvHWdeo#^YRbP+QJlozW*6@w>7L5U%9NJ~CvBFa5Z)QV^*@v0@I)9;fXaP0 zh`x8Wp(s_t9(Q>}a)vMMVx;COE}hOwVn#K;*rCWkD+YTAq80HUkXClA&>HQYseq)i z2jUvrm9Sw{AN!Zsf^YcyWfi}AFwIm0&tg(DtSd~N-F1Fl$twN}2i?V>wja!jIa62p zA3G#l>cf@xfp)ChhHO(a&!$LFAurqh%4o&vM4p3I#^be0UF3|PqRg4tFl}Aw?xW>hS{^I<}P>=|5KchX{^8% zPh9j=N0khA&BoUC41M3Xm&lQqc30TX=kt9JS_Nf|DInx>$2Q=P!PslUAv2CU%{F~Cz#m?ztOZR!)zcT-GH~%P@ z$wP``f?0vONEHJ@#`hb42vsSi=GbM zFt3+KuQ9|b8|d5>*-eKmy!%>8PNP=`(MrCc#{mos_X@Is>CY)eU^Sbc-@qwqJ&{5d zw(Ig2&04S(TS-}2lTU^qgteRSXK}FHALw+?BtD+-3v%CIqYqud``<4%kMfq|0(^dhMyTAJ<2QM@rvO8lH{G{b|Oz$npoH-2% z$N?;0|4;+4oFk@P9(kzW@vY9{d)a?K4Iu1V>8j`PYNxQZ-Ockmp#OONeMW-(D*9ox z#~>uw_r=PG8&6AQFbv&GN7f9J!AB;E-e$Cj!*~H#AE!0qGIHU5ow}@T;s0q zZiaji8Wk5ub@}Z?DqWz07MX&i^|;MLSPn1>i*%_UI(q}->-q$LeY8V_kI{+QIy&-z zPlq)Zm0S1FkH9dGPw4>JN36Xxj!Ob}EG@!C9-xgCCfA1??t{+I>v`X?zb@VFyKI$? zViKbFBN8p>s0c&*E?}}V+;;>SddVXDS&1V-ZG-l6F{^nVxyf^={9UwsFbv+g*%dIk znT0X=JJ4U@Lmmp$XB@Q8GSByq(#AHz`3WHod3`b?FgvFp6+!RgKuITWx4T216M zNDM=Sg5Vh#=ZHZr=AP8*9Rw4WyzU2u3{RY!D|2%A(F8cRDch2bMwvU`8M)_`O;3g- zA^ijs2%eL8Ll#=NL5V|u#rUJqqO6>vxGv8um?lR?Iaarw<-{eX z25ZYUC_=mMIt=)-xjSZFm>jwJmE7B5XkzeD`$zS)Yz8!5ctb^j9uPRR0Bb zLo`_j@JjHMb6fa|37aEV+ZxU2x1&|%K^f{slrm04xCxY;+CL9w)}~+%q;1W)E-KZm z&w@StRR9Uq@72gn*jSH~EfTI!c)gI-iMpZi-|z%M_W_R7WE3@QX8G=~Amb=2AlL-j z4dKc~cgEok+5x}xg%w4o9BkyU*7zA`{-V<)bf0&&S9Jv^R&K#+4}{kkWBiVhj4d>1 za#976q^`rjrYyQ13A}Q}*g>we8;+rYzFIRdrUcMe@M~O}4jGBnrrm;}w(1|o5e~7g z)o15@_`2QvC+hmAh&|iq%&jl_&eU3BWQuvCIb1 z-{Z=B2wQOMbK)e9uc(#7grCXu^STe9>0>*p2N!=Acdc5MVx(1FSKZ!O{9yf4nHSjx zO$hiY8;&fN5Dd$= zENo-D3d223)!Yo+GdiC44<2jiU12zHTj6&^7P01oXw4ZGF;bcQJvZLV>D=5-w3>q%(s=&zwAI7BD-*6@TYC7}wdB&7umYLMcyta#td!Z@)7 z3^)de-6;ePAD`eTa(8w6hY2{R?D7|-MPdqp2TPog4$xR-4Kj8L_G_D1;|rj(L%X!L zBT->;F>Y3sOxgjV_00)XhPoW3I5kP{l`DwceF1u2gxWK}i+H@wuinzDz)0LZylU}d zwcp8)nBwd7glagz_}itBJt>%CwWRJQ7Nq%^z~|;@1Cn1d77kss#@9PZ&`xRdv20xc zIC~;x0W2AUAWmdfQh|CH=?NgqAu7yi9R?>zr$8%yX~b?JQUN0&yalEZZOz)ILIS6t zqiidxqM66Oh73A)=*&u2Ga0Ok7VW4f#o~bsU%8k9Q9_9>!!>kRp1%vGw%LIQ>%QkJ z@}&0G0QWsp%YXMNptlAZKv6r7`U7PPE?*Irc3PpQd&Bi@8q>D1U=jG)Es+ioD1M*T zOpi`7E*Rs{k1nSkh(NTcpoW2rCWa2ir&SeCe9{k7RY8SEKf62IWg()8{jE(c8yrNc zAxO(pDh<9VMvYFQ5gb&+Sg=7Gog@4X zCn3YEYz~%YjIrQsR4M;^@VF5Bb~t|i>63Ce7Qg>W;=)lf<8zs(w88E4Ldx}XR^QO5 zYtWNBGq^v4b8hEYxd})8*>=#-45zY6L`)B2I{vg>KN|mRNYKM=Y6|2N;1e|Nl%i3z zJdrA_uZP6K7hfRL%y!ua-Wlv#S!891ce=&r`ggRdKo2X<-5ks;d! z!gHL=U@n`=@bJ&G!_Ko%0e_FNr@YPTbS$eM9DLp_uSXC_pmn&RvUPv~M(>&}rAA*B z?Owa;VI@7A!8V5h)M)H=9RmjxT%#RPPVnmtxf?&Xr!PycH07=mD{$b_g%aGFm)ip+ z>xm78aCR86wwG6r)go|UT6`N_0!{z9?u)4;RQ(znfb!o$`c6fI7QCe=0Sya56mXSp7J^y>^^7N?dwuuXZxSNjnirmXJ=G5WgOwM@` z|F3p1YiDrdyc?b;7+?XDlFmPdKjAQNX#Z?6no22Mhg)K`BHa85GXOIPe{fCE_49^4 z_e)#YgHKeW=twcPp{JkvB5~QO?|sLT(eGRd?H3>E;td!apQLwJm%QO1k#`W|cce(y zCpvt8VJ4>c_h%w_!#Jdyw2=lV>x~TywiPxwB7UKM>3$^g9N_H12~P5j0LMmmGVHTR z^4Bx24Hq*jG8qARczsKvGjOhXr-=%-@Lhj5fn3B9w1_%#$i7_Cm=LZI@!2BKi|!er zr$rRI_s{MuKiYuMTK+-9|1zaYJd5Y618(Gh=O0{seVv!$f$?C)XVdo2#~rPXkx%as zShM0fC!jTeNn8@p1gD=0XB}MQ76cRBy0@o)bBM-xv(X+{>R#36)Y?Pluv-(tG&*s8 zt?>FcUb2dN_(3K*ci-Vnjh9c!Z9cw%vx9!Ci~V4Qg${-k5Ms}UV?_V%+wo%J>8~ML z^xA_61IFCUCsO3oeV48g4%LZ9o;B8TvhHYvfrAf}$L`ls^g||Mn9@%EAn^?Lfj_YC z-liY37OXY93b_iuVn)T!J@P!O!+UFn7UL=RU&$IrnEkCMuV*h1atNjO)&1%_gbJjH};d>u4rsbN&nv=qmVb&-3Fs5nlOK~V!w(plWPBYM+K^#c(@=CPwCjGq zxxzMPsj~3q`Cj?cZkVwi{gg2o+8Xke5tIJgz(evG%_P0T2FA7MO#qWerg{SIcY+?! z%9eWsy1y<~Bmd(t!_{==ZG&5<33zCvRCo)xpSy&sP#hO#{{2xcWh$9fkv4uW792asO?@eVx3$0u@b6F3!Ys zA<2x^R*su}YjMxm7(!s=zWTOdbD;!?AY`xj*Q6e%@@yX?#uk>UufhLkc5^qKp|v@V znB|ev^1$tTRy{8h$$W}(M0x1ZNGyw~Ow-B==k2$TGR|9)+}De5EgqYzEW!(JEqY+{ zYdwgw1JXZxzY*ttv>enHk;AjV#vTS!euR9=Ec*t`$CHY8qEV*~6ci~Ru2naPBilojbR4Ry)Ur^jfz# z>mKwiutIf>S5nW6Rz|8BQ+8f`EtF%?6 z`Bsam5#Ei6E~EXYzFk^zCW25Ql0hIvgXnn0y<;N0Tk!sS(x^2*B&CD2)M!2B-lt;F zY(Sqj5H|I1Xl&2Hfp%JSp7bgd(Gl{abv*@I=3Xz^q;OKXGwG~8EsC%_ZY<&T}A;8>x5l1ncSTZ^Gk_c2sm)wEty4_pOz zOrcYV)IvY^WO1sYD|A(j$>pfAN#y=Z9}d#x#M|H`PND(ce_!$++4ykLH&MO6F+Bcd z3wW)W21%K>2~xoypUpN6 zEIE99Fs>4`rvVrD+$c|v=GQ;Tv8iprI%*2WbYh<6Rj9D3T_LwLmCX;((`e5Bb>#FU zgA*JWUK8n)`WKJ%=%-o`bk!L@aq>ShTq-J(7&$fjziz0gSV;Mrp)fK>(~80RDp~Q4 z7(U5c|1Rwao4%s4b&KjGQDY?I$)YCb)2bk{5wo%pGZU*aNwO2OawNyA8i2C0CSR+{ zC37nwCXdO&r`tz@-~#N-9IXG}oMXvg!*#O_#a}D+6GW+EPZM&4z<8o0n*Gkf>q^6% zXN^1+s)l|;c06e%>G_G{HTYx1J!CgBhY|vjU)5)_B$Gs%Q;7>CNAf2~6BN|t-XPQr z*347p>Y9n)JKWR3QyMBiIAzJfG>a32R=5p7)OLS)aQ3t;)MJXZp@)>$<$ig!^n|HT zPY|MnGJi7KsuFH8!XRNiTD&(c4&&6bzWuiGgz5M2y`S(hpi9ut?YryLQ=SsgvD?xY zu`F6Lb0k*sfFD;<>EkS)o#iK&H^G}VsWBCMug{Rrz|mTJf7(r^{BN6&TZtp+UfTwe z@BrGhQ6zRC$wb#~Y z@QwRnK8@pVSoy!h0ajSvb4x~*U-duhT3ZPT5bTOLi=(PJ&g&~>=Z(K!aES@X#w32c zc4!0(1d%Dv{Wi4dJ4XmwLcRWFX50@<%Wette|2Yd;%rE5*3H#ZsboGe7fEp_cOu<9 zGa9-L6Kav9b=MuMPESu)wdZ~4+BAjtSgI|-JHow9uB4Y< zJU;fc2o1o23Rglma{-Lx!7dudT{v?L-dPXK}yum)I!x z4zT_@^JJu?Od8u(3%yMnt1>c#L`_Xkpk|OjCC(e+h@U|44tRkivZl>%*6rxpI^Jvt zDgYR}gsN1m7mwq7)|{8;m#wm(yZD^4CO-CJQxePi2U8*Tv~JYYS{Oj#>0r`lP;}^E%s}UlJ$utdg zLFMRpu&#z-r&LoQJ+bz-E62VT$Zlo`Qq1rm-d+u(J zVXwiDvF=l$S>c@pu%V`Z_uuX9vA_~$T>Geb6u)t<=18zNvFWSU6}Z^AvwAikn}M%A zyFSex-_9%W31y+$WoGHh!KZ((uvJxMR9gt`>;9{{#GV4oPAbk=h3>e-UhryHr zu3UN4?s$PR_DB4Ql;sJ@nZ5Wzjz{K+W=;{8gD+}bHLYTKJFESaJ|c0u^G4$5wWPwx z{%nR3_J8|wPrT`r70$Kjmk#1dNHZdLm!ALfnU%nga}6midu=99*EJc;@%D&tUlTND z1j#u8ML}ZfeWJ$p-2w2MAfM10_nFIhslmm1`_Xg-`-5I=clc4osO z8coPy@Scc5V6kh{Q<^V=GEItMZ-5wC@U-lBA@k194C}w0ihuyU9Yt3J|PP zD*`&V)G^h4#*pIvDpe1x~cf^^r3wt1<|G zDBcPNWX}Snzq6@6i$-=K^*V~;D|-_QcLdOsulmZm3eA&ZswR5CpH+#GzIc@!DAB(I z21A%RLIfKSsh)KKMX>inHD!I^Mso*6c7V8qTCHemtAIR5H5%2;;d>idN~i@oqKQ4$ zQFHx|D1{L}Mo80^bgfnC)q7bEnOZ8A!AH^!Tj(wHqU6~Wgn=f0W$zuWN}IW>m@DAV z>SKLt>JN&Ox`tf7mxZ|^?EQ&dN_cjNR@@2FXYUQ>>j+V5s!)v^D=%g@lo1xlo`E76 zefAp$FSA5+KFnTuH}mW|cSWz>MZ(R$Z72l>$q<=`18T~V4vhA27WYTDX_CQ%=u6&| zr5cQ$?p>lSD^@X5)(x}Xm^$b8F<3x&%t;oLkTHL~FeOx$vrVDvG)>HLK%DCjy%fD_ z>h!s2R;D(^$G1UdtAnY6?4rSX?&Kc`cerj(>(x<2s+|IgqwUJ^W%Byr8gCgqJfNQK z38gyU`V`mO?jtOutoN{<7JdMx@Vup7Cg~rn3cpH>=C;OOkMdBxGF5fy4$A@3fB%T+ z4h5h^SyznxaAH>XI(8crRg&qJ2ZIA6_&Lh1O*xlq^0!w8-uO%O-S0)CjEgk_4$(D` zrWb4Zl~P&P{BE#(rWMAOo!hVF3*Jq~%r1(^-X+5=SHJ_zefN4~7^zL&Q58Ro#;m{DTa|VYLHHRuF&W>3U*0R8fvoC+momH zr{Y2-yrt$@%y@;J=n-XtxaBbbEP;9^O~m%%<3y~I#t1{RQT-3sx2T8r#$WpAzdP+1 zZ;o-bY!wt8rUS_czLRjD2{4-BiBKhX{tggv;){blAmLqf@t%AFo{{E;C5^Jq*same zn9kEYitI$;!zy>%u)e3DXe`F!f~cu+R)UfwiDJm#v^5jgi5@Fpa}z9Ug(N5y;!bnt z?7ZM}FxL;*^LCfwhSoE$w3ZDIRY1&i2WOj=4pw6B_@9E;@3Ss8c`Bub|Jpwft>WhO z&li)ou0n1=1ziyY;)DYp2g{m!{^A16skD6QE<@S<(%#j`KbJW=$QPIcFwi#Bg(Z$m zAx^Z}uqksTdiN|4`AE9vaY@)qLE?@3)FQW3GYQNSRNu1$)uhVB4)%C zaT?*LHn28*ooJmM_+}#!mN8BA0$|Q;~TWN$4xMh`qsUgFY9NfxrwfMDjjTn)PxMr2m zde2nrNA|h9FKsUJq#%-&y{oR8EB%7*ByQ_Rr@{qR=V|&i@MBP7!*;IHLJe!WQ-)to zsI!VJonW{r`+T-L3KpYT)^z@6=y% z$F?2t?`&9?fbn&na zB(iQBYWIpsXr%;|mP3Ze+bN-JNy{AFgZe0-fbG&w^KJaIa~7j-f&6;;M<2YFJXDz6 zWXHxI(Un`b4IG*$4lv~}3q4e?4UONsl@6odv}lz{jI%yppOz;>F_gGha%S7Y>d-(x z#D+DxH*37)mO#-a9S=)_^Z4oAJd9FEi{M`vL_Ed+HBi&r4NIx~sORuN`p7x8t9pN84Um@VSEPrCZ)fe&%@Qe#Ry&k6{u4W{*7?BfA7VbGwYY zU|I+PWV9F$;uImz;xweTHanW?w5L(Bki95N*xw}<=X+|()o0OC;qyd}Ox(*l{xJU2 zHOX}MSFbgg@i(Ijs`+!g<+J&Bz6F37SDy+{ylJktKdQOXg4la%GvK*xqZ;X#E+(T` zx7CQ&X!C6#E2TLd7x@{HLu~AQ))AZ)oJYL1HRmPIRN}JpC$6BhfJ)k<6^{TdwA1vF zei8ivJnEYA2U+Ewpun~lcu8?wxPYS>Ub6&VUvXrv9UzpLJ!US8=yFIc@BYFLgp>E}rp@_8A7F^@B$l&r z4j^}*c}i6jP(fpvWaE>L~uTaa$IgSFrsxT~AB>mbz>7Nd&HODQNm%*`QsHIGImY1%y30R+bu+hdG%^ z9yeJ{fh_q!wh5h^`@c%Ue`SJyH(#AhZ4qGqRTKzh36t04MA13_S7lIl(r~mfb27Ie zX5r-KO8z5{3#&)W%JrYjfH+x50V~;7-i+jbc9FY2&4~?8bxC5trw)A+<6b=pii1RotjpB6!;F{pEJg<`ycZ-^|cYDL*AEBf@A~ zUJR%Rw%diLv|96pNZlXn8mf9G{;^Qo`3_G0&wef~-7U|D?N}!(8+yd2Dctcy4FejI z;gmqYgm+=;;`WGA_uI$Qa_fd(mA>z7G-R!HiZbsQWUaPFqvN%ggaMvTd$0=6U+IhB z{o94i)eQh}yFEXAki_RVP$2oSqjKSWr8gT(xwPc91fGbk%Ky`{b|EfsUr=^KD43ThT+PJe6eckzdBhN{!^#NO0vTWNE; zcLMI#zOl-SQOR-lqpB5+2&pIYuNeN9jo-zs3zxG1sPPBqL@4EU0Urf=opt%scy=%ql6%UAaKGD^9iwAG0+-cU2BdL%Xix7JT{VqqX( zv3bo0uH4ZOtg>j0Xb%S|@9>!kD&Z|LfwrvJE=~BuwP;`(gSaCA3J72f*>ys*@aoBD zkD35E_rZ$Oawa5F2soADt-W)BL%MxDx#3Bl;e3_vio-M3kGIG!DA5v&ahmSqPVZIp z{D||83(0#M03kGN&YT*Mp0;v*c>x?7FE$ZXv7~sDS9B;^PvWRLNzOTVcjf{Gvl1Un za-&>~V^MB!-mtXi8Jda}NLvpMkX|rHVl88{10+Hd(f8=S65E1FDdNJq4mh|JeZrO$ ztqKO_J}n!o)bY>8UeigOo1u4u=n55ceeGr{d0U}qaEYg9^IwLp9#efu<_(CVf!cX~m|Nn;qIArgjfqQpu)FMkr!Y}SU@yb+b3weFI?AKsOW(Mrft zApL5_&8%F?oDYs1ZVs(X&G@Wm1F3sA0R?D`xX&iW9hWJsK0Co|4S@4za8nf~5PP@0 z=Ht`lX5>oH+G^&5%w<)T^z4~1rWwUVl%DTvgjL#O26;md9N-z4Qv7`s((Yu65QClL zSLlZLX7T~KJ9~4>j)wv+;YrXpCl(TsLc*;?j*4#Ygcm z6nk&VxxZz!ExbjKb_1bh|Jo{Aoj?xUiNc;`cjAl#9DeF1Vmlj|%jsbp{KhDFz@k zy1L;~R>K-q55y19Z`N#)F=$ZR%A<+mjD^Hd+bTlbZLfy+fJ4I43JVQ1>76dSn&u+5 zP5s|ihA!QRqENi|I`rz0$jR{2;yK;MFo#$|pE)P}TrZ@G-^^Up`{DE|$!{_KMk#)M zhvt;&(U+hS@}XWOcVacMQu6%=j#x~rwEU=7+4X$r6n1^hYRsLtssx7d>RcN;&;c8w zo*r*M4)8SKn2d0BKXS{OB}GQvUfot_li?mqpxxkb)a@iwu`o=ZQ86N8mV89r?`dj^ zr7$yow7AU<2`xf?0{@i}@AuOAw!QVpVFQif|3lhaM#a%JYoNgi8r*{gcX!u72=4Cg zu8jl<1P^Y(-QC^Y-Q9x|^bUDHIp?1H@2YR}V9LdRmy8)sjuDqnh@( z>#nMpzDcgUgcL)r_mqUvm+tH7?j^eyjMcDqURGeXpFuZA>M6Q=75Aa70DNC9i%lB^ zLw3776F2JJwZa%ExJw5zhKTuDub7=PTANd3_m4(Otd?3*Xxh;Q&09_0agVm~9`vSh zSi>d9yL*L-*d`;7&O=jjmT78|S=I(=jOtmp_EIfH4z)%RW;6AG-O$0FuIzLT`0N_sA!7Bh)I0zjyES+VM|CSCes zz~CZckA^Oc4eKZ>NNI52y*wb)`)?hYQYxgYf-KGE){Ev58)3t>Vc* zQ#63W!%Z;g{H3E&+vWY1y$1K^2m^!B=PKl1+A&FqA&%WAwuRcVml5oUeFAIHB|)-K zN0y|uBQY!8J;{!;ctD8~H|)J7!AqcJo4`4@2l`=7#5gLO&DzG)%I9?O5N~`Y*{vY7 zTLi6CL%MLKZCvLPtf@B4qRngP7d0z7x3Clq`7d4q^2#M2x^t{*kST+A1j>jNUo;H{ zIoU$nF1NU1LWW}oKGDO4E^^rW|E!yh+(w|DYmy)t6KG4{i~{1WIz1pIj#;_0uyey{ z$j5^UViAUO(pgUWMbmcd$Zt}+U)#0(XHamjHz7Fca01)saSDg$rjz|(${?$4oLXhV z>>Y{OXp?!@Q#okkA&d`WmMad-3p-hbBcxY(n|b7%k%iLg)y`z1<|ugm$sWAyAkDeD zN!1KSV7L`56#%%xiGgI$Rabv0^I&PqqP3Qd%2}l|D?uXoD`RAl)Oa3s2g}GY9sjJd zaY|uu!tGU&#ch7`eG@a$;bzTO1)HOfVnSZ?BQd^#rfgxsN*GW-WzH`Z`AwH(A1<8c z>)JXKuh&@rBz_(r%9o{b`{cC?{`ed84e#J-u(%lT?}fk=Y6$4%dz*BwiBL5>wYEf@ zU%r%r9h0)U@R~@WL%XTdStKiW5;a*MzG5!ffRDp{@=u`1#;E#tb(FKlosygagZczv zeq#eU8!?tW(xWsH5S13sMuon(Ck5v5{?95geop%K*1)7SggADVBsDjEFTOQ@p8ebe z^Elq%X$~OAwx(%7`rLw8ilQzmRdQhB?-{v!#}}~r3$e7)t!eb*3gTcnLhu|FEoX0Z z^9HTUg#|~*cXAKRye)S_wQzE&<&EAX`b{rY&(}o07DSn!ygkMzt|A|s`KmLiA_@;q z2$3OEOhiAPY)QNUE)cLlhTxfaGD6sQp;-j*N{^bBha~x0{Pu|c=|qGDx~vn|y-MD< z%y}%3rT1?hr zwPJb#3wqOaQrY*%dWnTu8?RvF%qB)*=ld<*u|9rjfefiBMzyV|P%SD9ER*7s^fPhF zlW~AN$dj{qA2=?Vn=ghwJ(zw(k_|mL@okJ$@u@xKhXpYKks=nehc`28|J4Bc`Fh6& z$@fKMDEfvWD(-1<%)A>~g1^Q18{?tIZTrfY2TNCHrEl`_f>i~XNt}$tVc>kE281OLbI`VKbdTg3Z>TSnx!qgnAUn9;3=n`pUduO zCWoia_D%*_$dPzpGD=^hl)9ti%g|EpzzBF%gts)z#WGie2`K~<>lyMooAtaHAM1k+q5)-0CWOq-n8xuMWppPw6M+)4=W(Q0}E z7i6rn4fmZ``vpUAiwGIARc8(}lMpKb$7_jMM7P8ekMT0zF z!lU6QfH=9{Add2sN=JID8kRDh~srOLMLb=Efv{59idNw<;TG8v*-;Iy6UQ?9klQ?4Udjrwb2m9X_*ssR z=nGk{Bf{379%p`0&$iu=Vhp6$YZ1A-&i_HRdWD%Gti}cXLs)&J#|H+^!5B+rKmo`o z_LtFd2nO(T)}6pQpUfyM?Va;fV9NUXsvm$erNTy2o?jw#iPVzKWH2>|=K`BwKl&-$dpXAI;8{keqJ z;%SU@-4&z8=6j~cv{9cU7Z=l;j$IZo)g=e8_m}V79O7=2ZJFA|m1u(Qk-m4LWfnme zMQdSN7Le(CKq|3%e{cI~nO+Qe=tHi24y|9xPgGK{bk~p{mcRMb0#@WhUZCfj;(#`X zLN)oP>pk#-)v7*Mk929-)JF~@+2^L5vBBrNMo&Oksfx3(mhyh_TY0JdH&yuY_3xGI z%pdpy>gjC2>wUy+m&aCk6QWCYxSlc+_uo4z+cC<1{M^Y`nq)>+!4Bnk;A&cl)vh_85FUhW-(dYN_MMlkD}eyK|agvL0C+Rd=lT z^!O#;sEBm6quh&r6{h^L{G$530A+)cwFqGzo8dH(x1`8n4jA00#wRAV9TVLmsA|%0 zsR*50YaMdaWgmnSy77^3q=|g|9Vck zY<@Hae6ZF_W-4U&ixB32uJ>bs+stjkArGP081^s|uop0w;w}|3H=WKa>=t~D)8{CX z&t18peuATxImYq3L8uAu>bh)^6d!7OX-dwJe=ice&)P!4zgv-@{(Uwa9$luJs?=f$ zETX&cqka2(agEBF8BM!h4{QOS!JjAILuW;*u_VUm+&*$)Ojt&?U5gI0J&c84K8=xE z$s4O3^*0CAB&Q@x&BMs=5dj4sZr-O1XjdM3H8%8+CI}#rhPSiC+8I*-w`DL>tXWb; z;Of5)msE^U^VzxS;E?#0_ud<=&H9f>?HTtQX*jt%jB|yujoJ|nUeTdQG|!3fyT5Xw zGth9`DZ|ClX?=4Dv}}4}dEw&Vw@DTuClk^XgE!?|VtLB5$46Sv z+>qaBZY^&v9qb)fA=y6zR&<@>^92&pBI$TDL@-ZbiiAu1ncs4K-^H0v8vjBsM9Jt? zM@#v1T&wK;JCV*x%0Pr(*vwgP;-h;EWiGV;#FTl>Zq(i&lY4MV@y7;i;WLtXy>XE- z%ONyo8TZo)@r**4WAbhO*44s`pBj8VO)SWp6Hxt{B|nOI<2lI^=%&)rFUa}L*DMPk zH`vybWs}*B$8M%mY&#Qv?vadcrFfeYctkCl!UnH}<4@uFO6R zRj*gurGU@A6ZXI!_%M~zJl#Pi(@STK=2e;wujI>aM@545M~R+#bo`wbhV}E0yLg;+_JV=uy+n1NkTdg{9&rngQB9Ll5jAruhY-u%ui09o-*j;6=9zhb zOer$eH~BW0Z64Z8uI!TY@SgN=a0YxlM$SaCT5aJ3aV#HTe_iMlpW^3ZLXYaBN<<}c zIj1_?$?)%zxfz1X@~SgD_g#CYo>c5~=4@hgo;JdrhtJ#1d5cDryBroPxwkU$5Z^QA^)2(tzc{FU2C2hNG`dkhoI zc?O5)X1}=h zdMHVtTs0j>V7OT6Wu&}{+ow*|`suqPYD(w0ACO_)=COa*XZgu%t$ZH)Y0YEQfBa%m z>nTYmZ2$T}YF;e6qhqnu_4?DK86)&^SG1O%lT($E4^AsB#5&GloJEo*&urj1LQS_0 zduq}H)S;h?M?P3-G%-K)kIxWv<6Y49M)cU6$#Xji^EA2`>zxNIO|TI3pYhL2ej}!_ z!2<{$@D#rSSaA^^EXP#KoEGpI515=d94W0c(~tx>?ujGe*VWBTCoHiA)18)ud18G; zGA^^WF_+Y@@t3HvwCm@JKQ-CNVB6WHaK=n~IJ0UcDN+1nWheh zHoO2m;)P#O2jr7h86)_>(2aX@M|?r32PkT)8S~OFi*<54I({}_hCdV)US?ku{mGit zck-#xsWrkygO(jXRL1Fy=$L7e?ME4_kC2>?__Js)Odxl5AViP`U1PE(VP=YiJCT*m z{i4iWWa_rFb(qSXsL6=>1LS3Agu(C@>&GugRP+&l9cXgLyvZcQCo}uxX+3@-Wh4Tf z?vtEOuAXVj&62FnsPk!f3b6KWN(1)OXsrqTlTA6?fUz!#aPOx3v|gdDZ1L7Fq?NKy zNV1fyaP9r;_e{vuG&75xcp>inRtMSBzL=&;`wX#v{3rn13HR{iq%(MEg-pYzY-QZ{ zeOs;*V`t!1s8=C`&e#Fa8tIQt_(`$rLOk$)=9%dB0tbF=Gh&7x2byh@4ty~pMX>wi z>NE2${!4b(#-ZIqNt}3a=XCUWY|l|5rlTi85_6IV0S*Ufh)6_3tLv@~!9z4F?TVZj z(_me))CPD73^fn4nDl--A?WRXkpQ=X!s+HdZ2 z284S*_zS%f19LrxAs7fwsq{MVZ9*j-BR$x}7*lZ!!U=+-J@nAAWijQ525LXRE|CW) zYsl#u%!pbBBBa7jPhX6t++1M|yCj+Varo5`00$G#*w{~su0rNUb-^FI{pojpg#i^C z*U{=af)@B9o>aOusB~Wz{cQs{e z&^-Gi1ooP*Ea(i^z|oELRfb^+G2yatwuX(V5YgF-N%t9DI~ioKcZ z8r@lOpp7qEJJlR}%I1N^f<6}n`Jkl^?hCJReYU~W=Sf?~fio(3Mw2? zT&)BnmMffE48AGN*qJ$Bo?egXjweApmTOhLfx@vJt&`;fo5hnkpJzFFL}=sZx}oPc zVl1$V5S6{_GQ1u)TrEp|USg9x_mPP<3EA;&5STCWYE`zV6hi=eL(VTeO zH3!;zFM6G+BZorRUqR^Om%Zx^Gjd0uqD5wpwM!-pSL?l9APoLQ^N4vvtf}2$!W=zH z#X#oZO8YDSg)i{dw|hb*|DHO{M09J z=6nh+)}%+r$>xzmF(jG2mnM+wYpH8`N5B}wZymTDTv<0ZiD4)P`TDD|VG;6V^p(NE zjQI{Lfh(4))g{UBi~%*0hLYCv5EQmSq#6X;&>DRdXcE7n_=s^B+q|MhV&Rv=Mv}CU zv+b&V_hef=#8-Div17ks##hasVa)e{2COlxc_~_}3N^<@r?!ECL5#az5CL14N3p@9 zZ|i$)<5}RJvZ)$up&pj(i3y9Ejo9x8Mq7Auzu4BR&4S>;fb(GmwHY(3$YO(%IX(NE z8r)^WRrTec)L!lcpa`ip13Anvi2)lV>Ap?)5|G0Ux0%?nhm1?3YrfvyIZ4wTbX_wc&J z$21yG`Wq&h!piaxbyrDCQ1v@DIR@G7B8IIQe>J@L0^bxs^Sd`yyf+hS)w{B40F_p< zvCLS(kgPk;z2XdtvD#xEKQXGW#qwMmHc^Z412xb^Yqn~#8|g=gD6GY z^LZ4K009|u*UTm6fUz+^IKPXwrNy+h5f*%AuL!1t$4Rg`X9099)tB3%xzI&8f zV#`!FY6e`(hs~yp6ewCLEEb=!JGOTiE>sPPSF!F0v2X~Q20hU)=Y11r24KOBX1A-@ zeGov|NyLTOo=0SWFn753XFvXure)(x2AhsgXHYw{F3n$VddcLsZtzD|>0`Z5p(w7J{5rO2^HMe2y)&|y`p1f|q*^k|$mgZR+n zOaqiqWvVgIBbq&}%UR?d%HqC83mx1Z-f<;MGO78P#AdzDiIiNrd8D%Weg;5;_}0a@;nf!$zGb^OsM~Y2EyoC}jFC|4;rG_66&2t|7egIi z^*i@CJu%Y=HI5-M+0;XQgzm#0!SSVcd(4h%(^>Q&1=O{jm+u8J6qWn3hvZHDWJV-^hZKxnR0c zV25$_2Qy=B!e~KvuytO2#NvM7bB(Rnn56g>ay@<3{H+V z!l*210yhax&dgEkl2EM1*N7Jxb};+%Z@xizR`tO z9Lcf#M6c9p0#^YAv2(>E^xxBJz5E0Kf;)Ej7WCdQ`da*5eYq$q1fb;qFN8rf0&-hu zP)MWa_;>DjPXC$vdYv|Z(@p>D)AV@#UGh<5y^~7KKJmNeQ;WOClRW>^jU|_)Q<;Up zU!_gf-wkhiV&-{sJs+oaU+x9kk7(Op_xgZy-T8G0%#+`QnHO9`pKS|%$UZ*mj>p9^ z3!vi*%Y7ZZl(YNe?_t}{kM>kk{i-ZCRlcng8?%;8RtZzL2wn6MFu%$BQH~l^jen~bOl)RWh~aGR*rriIfrU0B z&A3be+J9PvE+pr#5|4c&rOr}4(sg3voCU}Re{V0Cx7Ds%5q|!*vhA&^){NiQKc(Ab z3GW^J*MB+_P|1S(x$LCka@*X;mepU&16hElYuLypB8J^ycik`Y*E8Q!v%XIxi{Oq-N+U<-~@Uq?<< zkD`%&!*5xM-?8{RyE2l&S$q}oP_drMgJo`zsCWO)d2FN#6dEY9zYTO$-+1y4(gbCP zIz|hG3n&MaKQ$(I4@p+SIVj7}zu5~hjAJY30ky=8^<^%QYv8}WorB|)*U=J{qomtc z^WIdT8n2hFo8IW}9S-Z4o&NH(eVS!l)o`f!+v^=o;(Py_*&`&HfcmTv@`2oSn`r`2 zpxXb(keqyveEX^i=YQQ>1OJ-(!QI>SVJ(yybUuBWNA1*bsE~x{pIuTm-_p1K*eEO&vj8pnP^+mUvJ$Xd5}htcJc?cIotM5?zAj!m7*d|eHp%b6{dNu z8l$e%yd+d@Eji!Gc2(kfP4|A6)Pu!s4S0bq9p6muj33OGc`V;QS`J5sO8GG3d5a7c z{9y)97a1zy!_$Il>$q>VBA{jau`*(k3#ZjT6^U*x}<+!oQmmj@ESZ;ni*k2oV zC~)43lF|TsG_2ax=O6XDK~>f;AzIT94=ytg*R0Q~og2NMkTJZ4OE>gAf`RXuS&;=8 zywc}_OiD^WrWCioEwq_Lc9AwuTrE0pmhn@#Y4Lz4LLc+ ztb@0`ufE@0w+VQJwQ;u)cwE^aR(4HlUeEp$W&J_%KdknqJZ>w{lCu4t&Y5yIN-;P6 z_;P?Xt{PmgXuDBL1q!kE>R%jPUXX*JP=BR25h$a;y_e>7&p)Sp?M|ZI4&(@NO|dOr zZSw$Yj3c-n)B@ssHoY7du&Ln`4&OxKnOg#t3?4Si1m`KTCHZ5NM#ff1wh3#U%Yf== z>@Z#OyNo`?th?)Hq{*FF%hz2#G7se8bCjFS+SiJV52tr6Pah}un{zR*B00V8 z_p;mTPx~UtQkMr^9w*K)7QMliA?lv_mzOJ#-I`6RZyHN`OU`f&>0vx}_Oh@n0p#&z-CTK+Q#VMmx9W`Nn>KvY7c%T4;Z=jQJ6bZ-26gxkU@# zoR;v_c3Er!usB7pxs6+zkaNC@=Xd#cVS+*Vg=oKQ8Y%wo}5)*=vjWKq@j1-cMg_$u{9<;H6jgyV# zf4_%Vnl@TS#}ys8Pj`Xz2b$v)_Vsg!Jd1%smc`r=*edl6>6Tm9A&9c+=hJ$lJw5FV~Tq~oRcarxsaMn7!zuV&2R8Kg)&2KoX&8yH4)~py( zZr0Qju5E^r9)YpxJW_Fhk2fFb2XC%h@vo(CwAHGX>cz8ucK_@L)yu67M@w0+rJyg= zSRAM4*Vw%R4?l93KuKE>r1l3e!)65dI`NShsLj0jEL57M`cmTls@;#jiQ(6ivC( zwlhqn+L$-6iMc>XU?ZfR@U^}JKliG#qs;vi$p&uLq7+nR_@ij)4rYi}X%o~G2uL<3 zH$WK#2(E(TfaY`Err`+R(nPBTgu21~_`SF9T98QnbLq@;n|=5Z<^o`cGYeAW z>!GWt6aWpNPs)6u*l#5q;jO+zf~+nOK7%I47j0(f@gvaEWjOKZPThN$u0pVCtk;`8 zZ~qiC@!shjI^J}31Jqu+`U*G*HGvG|^I|U4QlMIYk%ck}4ewkmS~tC6tMBM*LwvVl zYUqJf-|?ak8v20iR2x4W{(Mb6Ll64;4(J7~NT^c{*-siF^qfOd+Zq_8ZA*zdpb#r< zqmmm`N+IEk!k=?1I@Fv?q2J)Fo=~fwwwj#vN%@PK-aIcV3cn?sprxUm%nvuoGAMWp zup%1{FmDGQ3rjGfP`ph@8XGaNvZ9pDLa~(yaLOUTFoXiXK8Jr0RyE8N^T)FWpL7FL zE$8dAj`K`c>Jh86qfSL2*DTf19_NpQ6vZ^tNG|?^ReXE$m4=63P}l`q+iRmJggZ77 z_pnZmJ;CuQq#y5WBAf^j7$tb?R`D+c%hp17ql58gy6rv0J$aGwW>&Z2{XSqEPKU1D z%-XsECwI6lem*H4wscB!D=#`aNmJB!LpJThwjB_%uk5T9VY-5?-kY`oh!8B`&7&_P zTO1uhhd2$J?PlqiKx=?T9I?z^0RO|I~XEJNu%M_!`TZ35hYVe1}a`;FRvN) zz@}{4^?x12>yhipT;XAcVu#P}gNr;t7liKuahpfiCY$u?Yc?6AoWFRR3uFnYO&~+9 zf0bcXItSHbPG&GwV3SYekqZ?5fmM=mA+ z#2vFlE0dUw^5=q_a1W24MKIX--3OeLw^vsz74simWDWUuH_g!^;N{ObXwfIX8{9J< zkv4c`&8_nL$!O2mei#`Dg@HY?t`9~mtJ}NFT06S|r)d?a=AYqVd9bwAJHF26=2pP zfOQ0;&f8ZOq8Q&D1ERy_Za+#k(F<9BK$b+E{mEc9?oe zabt*B4}k8&vHEcO8SQi>3G}bdULbT#v&CGES`3|(LJfuVGjy57)sUPmjn1OzWEz`% zdF))?AZ1fE`4Pc^-33eihP?ZFQ=N-D#PJsYgs^=wiQg`pEQ|gly53p( zs-r;^%CI_&>7CSW-EQ3SPjS603={gF@v^`Ewvd0~`-*wwJY^tm(i@yvFb{mToOVZV zb^RT$@Nue3$1zo8u1sa*5?;$f3;~+WNliuKwFps;CJVZ!KwI>6Y1j_~<2kIb27_sj z>43I)K6ruhYd`DJDXfHC59-q?e!Kj)*9&GtmUjq*bOo*?J2f55`$>LlI%B*9L^5;x zvx7R~B?sqUZ`u8`(3XP?yw`yAnTOsEXwyvOQdrYMls;U)=s*Fp$o8nLwLBf1qtjm$ zF+mt8%Wm0~T%tEaa_7Gh6!nw(%i)f+1wq5jz4Ech@X@R%DntBBjK%lCP=4gf1akD& zY{>JJOC@%BQ6D)mL?kHXnGK<0aZ(zOGZulRneJti(I6V$VIc#zWdQx|!3f2?e-Ces zJ+K0%RKslfThH{b%AU~e&q*phVX>=vH|!ac=H0UtuLh@?T*r)gB@Hdi#r1zI{WNyj z4XM)2<;&Z#H!`e}x6bN*Rgfs;xy&4&`z+Zzdz?GX9NPjN4Kn6}h`k4AZK#>z*fikS z7L`WTco##^p#$v)oIr}vyJ$EIr{H=dEsRJDx$JB@D4`#MV#e63t?o(zjIEwZ0c5Ss z)g3YXUI>2j@j3bo1^TaNpsoSVhCYMM!Hl!_5&K;MLbm6I*gTd{$-!9H0zQA7QiYLQ zohqXRrxC^swp{>*{p>m(`b@n7Q*7xu+0|}wvp|bPJ)jG_E zX?@io^i9e^C+r5Q5ITOr6Py~P&mh&w(E*Xiz0s{TV1)Rsa>Ne@72EhT?k%RSTOmJN zw{nZQ0T6E@mSM(GVZ?XEBxmSuAdJLvf}YW*j2=CR3tNqwI@>{sw^D)SrN%E{Ni>2B ztMP6kbPZW=FnQEcu(kLev0}4w{$9_>(bHP=or6x28j9_qi*o6~`9~SMnPV zL=%V^a<7ARay1o?hxrPF9T9TNP>&Q2#R~9UN}$godN*Z@;}@s5e_@o{gR1;_7w?cs zF(rSPlS+9jeHI30_Lj6Ylr-8F4KXx%uYWu|hn85;{Y`oM^KV*>wfXb*=lj$A*QeX* z_1DX*dcxP+`PbL=_UGG3f!D+3^A~sy+Lhny^Yw%;Wm%eNRsSzw$81PZ;enBF3cekRvZMUo^k|HQ=gA4n4dvTiLVR+=PkB$@p3V6m}}^+r|2a(_g4wJ zK?*D%OnLnSNQ7_q26Fjxld7PUyhVPt!sTRYaC|uqGGApZ^~M^|>xjU!R1sBfj5T== zkt)|w`!@yu2i#Brqd)1gEU1f4l!*|P7a8La6+uHKywy?xx*h01oz@RR2vm8v226Q2 zEMs5A{{W)!v8v<^R23XH?#)srv8@AG6^MXHcLzht!&@p zw-iD*h=a|Of^v^w7yfooMhxJ&GS7drF)G$xrFj!tv;DjsJuTgTS)h(?$CqUx>%uN& z>U;bb(+|Cyr`quNr!4F`#6WbItHWPO4sX@)f7zs2N0Kh=Lwde==ys%U=jyGf#6@S7 zoR~Xg;2KcgX2cJqu_{J@rgEU_Qmhm0(gN^0{HqddM3vG8$HbEm>|((|cvK8$0~2Z` zq#!wz9=f3y4C^mln^ke_|C=qYqzz)LBiMz3+lVWTirVv1pqFw$t^6oR4nysjP5CPo z`wqw+%dqUe34M&~k1)*pANxuFSwxf@CW5mb6g+r03Ate02NWh>ql9<~}N8twz$Sy&?n$DPq+#SfI3# z`i4d(VK-tSxv0wcfmg?wRGC8oH1yL~3>s!9(zrs-JF=2kVQp(UfNw?|fNV^VNqjhi zEpMbI)pmg1jJOAh+R1}INcGGs#ia{NZr56uoW+7UXCM?j8VMSw;MY1%#f6mfE$$Yi zMMn^dP?Bv~mJLbvj}!kyy#Ux$jdQkPP^7-!gZ#(B#$||7PO2AkD79C4-oVwPf%Q}6 zXG9#zH*+3lb*ngk(j;(ZoVuunCF8cf38E$?5cnaNwXI@Xz&v#^4HDYM6<2M;#t}+T zPWt^d68Jxiyee!yG4AIJJdG0qNG27J2`ipufY_cYc+OPjcS%F)-Qrc1oryk0D}fDu@b9(z3kKl+DtN5oQI{dF zz4`2%G=TnzS9m-?*E{6gD)s5NZK(E3Z_+dcp|bnV`P1+B%EU#ly!jtQUwG}?(%R|) zoTIy7Hh%Wt{M&;i zE$VFR5(CF~&rJz3k87na&p$o_o7>Y~3AfY90p^!{KV&WGvj*!K=SJMxr z()2@n#^x0#hVQE)$p-l2iMoVeOL(WxQM{~Vsya!A$Ui^9{)=!y`j5U{U!>qn2rcMh zVE&QzQB3vrJ=h`zq^SDA)hXilfvmx?O0V|ji%s(vce3w)Jzc~iqeg=w<=mzq{PgB= zjrmG%5jf}bR2IDjk6^`o@RATAsj4vo6U0rSyg&q5i3xQHVtG;iw*iR12L2|-c6{K) zF7||s^^o@Z4|$Nd5f=<&*IdPS)b;L(5Zb~6J zdYR*QbgU%Oz-=o5j#XAq{o51zA-g`;H$cseGj^XBC~hZ=Ykp2fnqDuE}Za7nb+ z6f#A0B*P=|SQMZjWhrXkOn2kFIw^#K!leg=dzga>^7}k=RC;1fxTuR6)4IlBH7|^* zP4gs1D0Qoo-LPYj0Jx{rFf4k8g99w8&r?$UOSXibv)Z>VX;G`89CgIUyv28>Nw>6} zX}fV(M=gFNJZiU0=H?X21PEy#c>2)Vs9tRp!_uWx60h@;InG^;H9rj@>z-sldwbF1 zEP9~Xwdpe_rBko}q||4ia5OM(Mzu}+@~iY|_zT<7ZZ!sKl2(BwOYCBN$R%)v0TRV9 zio!cvTQDbuQm^|*RV2jzDX*dn;rK#cpz?A-_TH0Aooi}HDJ?K2r1GidNTJuO80B?ImI zazDo>sTzpH;wpmod=eI2dW%Ragzg^BWTezTU-=%k6U&oH3gZr zb@Vr7fOA`dO8jl%ZvHrz{P9*zSLV31kB&+V5~*Wc&=RGa8P=f#%vBse&IOjQHxGBJ zWl+=|_?l&PgC@!D<@>nuupFDkWqt9ZC zd%l;Xh%A2#J>IL73Iu?+D2MsQGas+Nfxg0YXpouVl><6(D@15)ip%;(5;W5<=&y^N zay4Xx=AodKjG9`zL;5#{y=hjp+xKv*6qHLk+~!t~Sr>+gG(^(Pv4OsQV{EK#ffe9Y zpq6Gg7ZZR828y}v$NGJJl8RtFhbYcim7e({y4W(14c>J0tZdc%DkN{!w7ERGKexIEw{5uR=&nNH0yMCwy%y$!=Y*ubXVk9 zx{|&K_L?2DZh}fORbxTvG4>S{EeBKGv2a$I4>2WCmYaP}EZEX@?}$TN?P!k7U4ZCzKd+^hi>TN#nh5bEo*>7e(^!P(ei z$IWn&m{{2u#LXQXorsuNnYjMGan8c=|GsZ_jLgvQV+m%4yOW0Vit?a*m{I$G`eSqcDVv@BZPRT3O+(|hBhJc*kW zJg&5~W+!=@mlPO2%`4dIoR|`ddp>SU-%)C&5UmO`F$f=%hdSdbx)!MsqBcbvDCZ#1 z+$tOMvO#b+ajuzz9=U$x-?H5e6}F4wFvy7juP-&p47J-aGa5CF>9~|_JV_Ry?b9!3 zwd#izfP(f9=5!O9+LxIZ(ZN+6c!CL=+`_mZk-6yBKDVP%R$glpuKky`(UPBFHA1+< zf=fCAbACa}5w+RTtv8e0dW$f4#gwf%Dn*3}J)fg4W_Z4<^h&Y+tPDu>b}{JHu}~?> zP7iM=x>cwmuBV1ptC@O7t465qv8?C4lLwQk0MyhY&PgD)B6}(qI?q(|C;zCMld#Oz zaXy+hOGynHi?Xt_C}`DTH4rS|E?uXuq+f?`R44PEob>dgp;;k0B z%)k)m$BEIaACm;N;9KMdK&LoA%iV}ld{)a_>q8n-z=3%lQ9wy~G{5H4DebVdF zChkN@R)W?mkN3&j?xI#-JEa04i@3{M` zp}xMiRx>Z_nc=HApl4oN$yye!&}1^e3@0FF(W^O9p>2}JwY9PN`Bk2#FwTS3B%u0d z=Vyp6B@@WHKMF7uh)q+_6m(6}$CYuQUn-AW4OK1#ZB6K_5y!*Fn=HERf|iy^TNngp zQNHG>%&ycXls{WrZ&>VyI&EI$Dc(BhOUB_GZCff}JjPDt&i*9r`4}mIp1TxR7&&N~ zq5W(JTj?sG!`J?BSX`X(^6>0%^Yn7I8unPeuC=D)edjn|HbL{$ul@XciljL~0JPCW zcA=^2CX%A`IjgpHWbz>xnQiQ_BD=i(w(=}uD#?%D*jLg*OvEA8;Gwm^&gyo@Kmr#e*1}znl zX4ghKjf4iLKM1?1F7)L=F2|Rfv+C=hA4&La%80f4rv-Ppb(|)g*7@{`YRq^axU#Uj z<1ZJj3d$YdMA$##;Gds8u<0WWF9+Nm|JYp_dZfp$rAp-q$}XlDFpD^$0#)Wn zlV0bwhT}kcoBMdV&MR#9c`AUg;}9Vtaq&8-%PcdcUcKt2L5OzRI%m#dhL+RBI6e1+ z$Fg=?Gpv|DfyMBt-}^{$!@>wqkBvJX#!(p8I3*uYQSqifbnYwj&X6@wHH55(YW9$7 z)u>CWUHaiPzKG>rHn08KFrHrlFZktrELE+_vgp-wlwD-il@>FtY=02#{e8&Sj4lR2 zrd1w$l9E~JvE)-|o)TEhcu05(lHVw4{NMHc5a4No0@=DRLGkGYV{`*u$@-?u73bHV z5#Wu~)R<_?aOVEf%CIr#(RPxMPI$agyGd{`RT09ZgL-2ZTWy(bymXmp>2WZ}o%GfZ zW3e#@D;f+EX>l-R_wHz$JL56BjYcb22o_u)J_WtUA*Tr%1i4^oIf2$xL5OAR9{Yzy zZj@MpohJhcjoq}F659vvsc$g&wL`w3{*m5c=+gbxP6IH10YX$lq9M7eSI1+=bF% zfQQg+eAgM*p8foM48}nx{A6u6j=pRhrfYGenTK$|ovp?*;c94i_m3cN%A?CZL}N*i z%lW0j-1za8pBsDh35cZr)tx6{Sc+(VAL4?xdzk=vrwVteb(khlix7E0J))L}V8ECS z0NGJRZR972s&2G(<%~MEFsrAZcl`bo5(s6o$F(`?Zd7zN(^B|}g`eV>B4H19^zNm? z5dR5SUwgjoFCN}~30Q6nw(M7!oI2|!;4<5VBp~BuP`mkQcr`Fxrc)%a6me)i3+UDY}GhPe#f> zssmUO=ll(3ELW&;OFnkZEb%)pS=J#KzS)+ z+i1ee(QR*Y{oZu;CL6_hd0V@Wz3bz_V?z1KdG@((YnzYbiO6|9)wvPX@6>LXQAbZ zV}(1f1HBqy$q=B?Uz?ya(ht-o^h4a8Je=-EW<-`F8}T6ou)>xZ+8;--Wk@snIJuVC ztuUSQKw@_l`%X$YiehU8*GpXU*rAPW9lS5>vd7$s*v1JC2vEU+zlUs05^)n{@~0;% z5EfKpj5nbqwjnuT91p2L(+}1}KwvPLP{8rM7X{#W;CQZRWBbJrq<}pM^t`0sj4D*J z_umMPRQ(7-qvRk8~1aB z97Exh*uek)0RT?LB&w1BVFlg}bAz@+-M9Lksr~|C^Uc>NDtHoIaPkyb9u${~d`c9T zDDkikq$+ci1(XH9>yR!aCt)IV@Z1lQfu9!q)L>_XiLk(1qY!xH5d1*DaVi4GS}Z-2 z$^};U&I)V+ydt*?y93jg?wuyAOMn6B+aSOKTm^<6jfn}=KcGY9#Ezyz#kcgGW*|KH zHO$+=x2I}2lwvslzz_e`EvN>Wcq77xZ&y+1#f+qS&NEG#LP}r$~VTO*P37_B2BQTM8ut(9b)fhe0aC$X@hwX9(IH)f?aQiJv?InE0V+| z(*81%=~zT?CMUe}if*@j#x^_N*G+t+K73g#yR&UKBEKuxem+hBAA{4TSD#`<-*L%) z9z7*5A|-So*-5M05N%Z7aZrD_&z?A=ac;vPpcwHt&gWW@vcrZv3EYDyO|BgLXL>gN zMhhWe!x1{;|6%Q|gX#*JeNo&A!QI&e5AGTqHtz23?rb~|Jh;0BcZZDxcXxNU;PUv+ zx#!lsuil@pYE`eEo}O8~R;}u)>FN13+hb^mtg-}J!Qa+VutMUI--aITm{kY%|uh1AL6MRE*bhC@^?&=oCJGEiJU~Og;gB% zH5Tc*^uI_dKD{~_Hia@d1&32aU#Np`>#XL9Lvsv%dvy6^#9kv&8^&saDMODi_0#z4FMY&MpHkE(5p(73@RjsFUZY23Bzv@}RD1!vc{F z(c9<<3bv6n<%vvZ>ZCw;RWz&pF?tK*;W1W6MRR+egwZs;iM#*sK|E|@X~q*oY@d8G z&+J&5#e^6e6@lp=9+&Yn+X-7X#{%1f47Tk|{6ip*Xr2gl7Tam2)!`T)bam3im`ias z+e7BCg<=y!g(Fx zg~Ek)vfakY1oMkVuly92Xl9qk0{afu2<^$h}JG|6ycgp9G_RK#f=+*83W&=LiyZ<+Nx)=o80d>UC5a z$Qfd&iEL$KYc?N(qOMmr#N{P*$}#61s5Q2>fpzbZGX5+-dH{FO8W>T9-GV8W&vJ+g z^oFkAAj9I%s@63R)N{lO9T|LOWk&xp780xo6^c>1SNFRF$Oe5o1{O>BfLH_>(PJb_ zTTFT3f&}w3X+UY^rd1QGPgvwdRWxqm(neVi|04b<8zw>(m{w0@z}2hJMJrn+$`q)k zi)EQq5n%A-3_XbrOPdMin^4(N9AY;9lr}KLAxs^BJE;A%H&MB`A&|$;P?<1w{)SZg z`MYaWrbQTG>duW2H!q|V)r99-0@7Myow``|pJa1gOOR(FKAHtmq1w*ah-pm9>|yk^ z*FW$zMbqy2k=MYb=i$;d$rZn>M~B|2=dN3}7T*S?TgNKTWEimnkaxp4)1oHE1Dg2q zbi7`EP{%QbYxW>*r{j|2(r~=K-K*YegEQU2^pPp!O#C^Or-2kiU*5;z%Vcr0SCh6vG zOuMZCskM!Hdgqf82lMQP=D87pfRgwq%{5eX>|e`D#mh0D#-Fn&um>iP(mabng;o?MvGTWjo}Sn zT@<+~zu$hUmy?iw@0`MEK^x8QGcwARv#)lZV#XO1+|I(rxGp@q5k4!F-<-wN{?KwB z8mCTN5aMEUGW}6ivHfVo`QT?d;#GIM5d>m2hZjVNvg1@mx)(%NJ-$MksAV1gCykGj zoBKZ>^BahRllwc+%-+J)l7)MQk^crXfUN z{QApOI(Q1RE;S@M?gtnmo=WN)j&K2uzR%zV_S`rWM#a(Js->4kA4ZKD6A|(czQa{S zNRiGBAJE5(&@ypGotF{kxBG zjD^UwB-I9?B-Rwanf7z84auK15vmD^%A1i;?0-J4=MYMIOgEAADV!@F^WDiJuCw+O zw|*S|ZgHx-bVwx{_MGz_4?EiSIXWkY*Th|4=<+S#=hQV+iQk`MQqaGnfUd(9deQwQ zo8P1;N&Z#nEwxl^7Sm2rydh2prMX4Y(^sYGV2T?*splI(BGV_&KU6Ca|9aId47ui}ukTfOd9CB_S z)qRNV`|(!Kc+X3eR93~Q`f+Wx2yT36a(qlWj!X%v*tChG0cva8FfC_%TF{tADmDm%&|kPL)_nCm^7^ev?#b545uZ>c4Yfd3hISOFQy@qt*6Y>#%Nn zFMS-BEKyjBDRb>S2BG|U5$`aQJc;z{CP98u`ZhAVA>h9r0I~MayL*^sUy^RnzFAu= zMt2;BNxOjJR*<>`;q(v8e+O4Ob+@opmswv z`>aqP`0O?~$ro>>L%X;&oLK%AJYZ$Vz93~F;n%28Ca+53CHZSv)gpbuC-WYC-|exx zfY%PfED~*q9CT`VI%J-xCED5eui^v5HD-h1^KqvBIPTzoOPJxFgur8@&vSm6Ibc!I z^-<}5q$g#0`FbOe%b}Rv4?6Ao{Jto>4-biKn_^lY=S`_h#XKu2ds7e7y17$(#U#(= zTX_u<56W;Y7^xAHeY*jCfdDW+_=_rZ2}F``d>~OesKliL*m#UWSOkG|-nQ8n+f zP~01`=#OO=R;SuXj!`f(37xt1ed10CiFlU82cI>V{+$ zkSr|#YDhr0!7%>VaIQSZd~av#`+V=!>a^oL(0R8X(PRVkoZL(5#3FPlFh}^%rWx{( zJS+0^l?~Q@9C{>*uQYu1l53cj>K^b~QFE>9`Id0JX&1ZsdcgIWC-ON{#(%Ttpv654 zV6f1^WaNoUaUN*!aO{hapu@;y`a5Vc?SHHNA0x&cKwS}=uPQhH-<_W}e(24f&=TOm z6+)B=d*pI`p45V>RsPU<%B`MmsJ)Bn%Bp#Rg( z|E1w>Y}0)>X&?A$d^2cxuRG_nzUfm&b$^x=B4wotLY+NDf~ANzf9nu~0G7N!DJg0R zR60t&7#=^f1iOI9(ZNW{`Of1Z_jGx$N;KHavbZI7^nswvJ#>Lf?)k*1# z_#4;uYetYpxLFCiObH8nEu}hOynwe~`^RXM=0=Pr^l!LtX4KIa;q#Ub_kS_Y;d%ej z#sv-KcTG!*K_slwH%i|5`mvk&Hf;}0zUz~Yy>0cOE$mvXzu0W@3_qM3JRO~j(Um;M zgu6{eZw-KqOx^rC{L?NLQjXl71^v3;Ti-+9i{3#=?~U)#@0I=#l%(R*iNfnNN#6Jc z%<0aw3Yud1M@+@PSXpElaO8IQYo&E*-NViKBmw)~Pz>EY#U~--!hEeIh6UAOxNm>m8uMcBGWrX4YB!G-yX~GY zpASIg2FO>Z%f%ZYs?t*OCsdz8uYogt2A$Mpo`gLlk4LyR)AZ#Eoy=f{miOahKrUpgnO#KSoL*q|zM*=`b zRaDUd{$HbN<()-@f@m?u5F`cKL&;YSC(VY^tg9`KmYmLxa){3t(SkApT@(kRj^~5P zHRMNTQCw=uZQt4k!T==;-}M`6E)rpHZ4T#EHO8~pd=|#=&58gQJ67VMDkZ>dcBkIQ zASaXCTW6QL!InCS9Cquy@a?p|Fmq7h!)3|>A4gG1!B0kV&JhfJ5&{e&EIdQb{d zma5(CKG}U2Uet?5pD4DWfGI*3;v)A+e^CY22HJV45X$(>D1!TrkQLo_$wbiVE&r3O zjn2)83wkvNJJn#k3h~VMXl>CS%I(?-v@@84nw^EbUv_Dn^vlSnB%=G;)63wc1!xBT+(L2@)FErD05DhywIMj?Of62CkUpO;;EK`Ar9 zOUW$%jWPPsH51a57kXsiTxUDlYR8lw8B)?(6Ef?bZu9Rq{kqo4^oZGneG=wjGS4_& zNr87OrIz5I4(H9wnH3m?TV0=S_NcDwj39%(RRM47r?nC7VeEOvm^EDxN%PLmuQryH|Pp?bkU|!xYPGe|{5$M+0zF7Zv^)FrjZ&H}YRtlC2mf z(Vz{ho>NlRqs#f-w{v_8VzN)MU7Icq?IKpu#!ePM10vR?SgLob3Nmt^J+*MxNs{*k z4VqEyV~&%UeMeVU{_BWqfLIpuD>x+kH=Dfqah>%9w~ezE>uNRo@QFlkdvOq~#ju(QXNcUHFYi9;uE{SNeC+@*A5Q6|LJ+2_C5lV{h zFGP1VxyBUkKg2~}#>f}vFqXM^76xv!m_GqoYbyFZSGu&6eN}(mvhnQanVOb|BnR`P zUN-z0v@RSZIF8wCF{wjVZ4zxNFDm+zbce;0R9V^k^&rzlRzM}Q=*y&T38hw3y@9sP z*H8d%%AsN9fl0f|9!P`R(RIS1%+kgodv)f-xpD3=X)y-eX6b~b+Q?m;6jetM{hdzB zPLKSOSPrv4WXCpShNgXOP}F`c)YGF0y@0)#c*aNl-7B42S(*Z$wGmcs~!5 z+6s)p*O&BGR|kV*vOP`H34st?MEo?{N-FpzY-4xW)*wA2?Upd|B&P5D54pBAxpqbr zx|%xZ^+S%$5eEx~DQ+p~WkVWu_<*A7*1n&~qsCcK+x}}#5Ah@|qcTmQ=sScOi9^X@ zTB(YWJRuL#*`Q9DiEg!`M#lXj=Z9R+F|}8uSjQ;p*aN(19QY8gA4K!Z*@}`)cNWqh zoEPXqp|5NP{mA^bt;bD?d^PAMe-q=>uC!;(qJ&O#$c#2H+E6|7#nQS>jr%bo61Ih3$lV$VF>qhMu zZ}X3wI{^cVJ^m$_P)s%+EFw90Bi*EtG@@*IqiXV<&xm}wEY4Ssut$f1QcdPj`IT9R z|29T`!JXU8|vElPg$uu)h_BzJm1&YF580P zX4|2anH|e(yi-onP;^1;(JEmK|I@6PU)CV6kgE7^N7e=Qg6TFDAmaIBXl;a0@_Xym)CgzkWJOfy$y9yNp^{MbPR|c2>+3!Tm4PVo{CKg7Bo-4 zZ@H%tTv%-w^nJxKvv!En@gaVtd*mSPhVsXM#ds9ILf$&;K?@><*hK4!T{E58C?pqV z$@M6u+oIkAfPp1szjGDmaIwk-fUO{5>?s6YlvJgA$i4ND@Sw95t<_-6eoy`>wj^%R zh}k4db>m75n?%jZema0UUG0yEeHjLo9d|P@}g} ztQKQ7kw!URF``TcsAOD+k&u{L+7AO5pjXQ6=LQVW#aD}EW38MJfN={G@Q@ z-h8n_40M?gdh}wkQVen#7<%+Vv0MyDnbag^+C2I^TJl7)&vT(NMCvd2^jE6 zk4h1wiI4gbmxrD<9ub z)OJV!?zGdz{qg|Ng}U)ijMb#2;t-7pbpXH2T2X(N%vxT*I-rThsT~8Pw!;aqSKGk> z*sJeQ1MF3I-~d$`#`PGBv`&>6i!@G+7#g%rMHm`1POTUzv`%FhDKt*C7`U|8xBwQyR+6Q&F^VO`dGdTevfM*>&%-nAlpCxp zyxL=%w#UxSN#J>dRZ4u?V<9+PtQlts93GgbtmP|>wTBymtXjFOP%P89XyIK6W|fPn zCv5gqp=UANvrptk;O5yF&9(c*aaiYBvUbjs;kCi+pq#Q~D-xCHW*w%|bQLvV63``$ zE_EdBfO-7DyPlgP!3s_VUxii$adsFuo3;l03!F{;m5P*B%qnM;KE@tlMKKFge64Q- zSsGxKxyVbxo=m%GlLawytNks=In6n>j>iFI(S^m#sdF0mjU{^LH)WcHLA{5Zhb7Yuv5>-W(cmlg)3? z6`Y}n^1P72jCzM!(q;bq>sWT;B}gTQ?c8Ez&?|@KEg>t1)vx!CWmZymk2Lc+PKDI* z#IN`vbz{H7BXz_4PzMOI@J={JWxC-l(Kqp)U)?vIqTgVe75UEA**}4srf=w-ear)5 zx?z~rcNgrtW1m$4Dmce~OC_;5Gy%r43iLMoJa*8%F_IVPZ^$?nneYiX_IKYHbp1Xn z0BlcPY1xyPUmtG>w&I)RbnwL)I*rbOv^Kk8UjOU*ZI&3Qlh(w%ei1$B&-gx~WN*@z zbZc+amUc{COqgbG*p_X@HtR-orX2%vMxC9{^uV4?0%|bpO<*(a&J_VunDxfIaGesn zne~ReGE$igd2_5NXT5Z9bmbeFv-(^)XW@$fZL}aw@j9&OPH6!xymzgIEts<|qFsX1 zJDrxI{%Sj{DNYd-&+GbOFX{|8^b%^oYER6kuw)H1B$h>##g_$&e&0SRVVwv0?9z!u z@+?s>wZ@gj9Ag#30&iRO8E5(JwNN#x&HwED{y+an`N^H+antmkET-M5BHsdx;)K@L z*(VA6809hc=(6y#DZ*1IxqkyAe8B60kz;{nLLX|&-&{?kmK3;YVD=5dNS8pGtvT-E z+Ox5Tfhx#-FB1?~fOBW9jR?=ilDT3N7cVJ$*zn)Pzw9f7(GTt15h?{8N=3QxK*|m! zXKNITkvoZKSkzmIq<}E*f)Qc-Os){C8ZJ6i_HfxRcM`)fs8h!T?q3 z&^!sxBDE8D-Hba;-N-%LodQ^7OY+e_;L>M{Zwumz@pTizfbx>Qup6ceLJ1^sKUlxn;6d5bo6k5}R8Qgb{?;fZqtd)VtyXuYz@?K8Rnbui&nDue2X&n>701 zSM(vfiS#XeX!+;wDXyUIZnx!EUBLBV{XHS*&Dm{aI|UIT#5b6`-ECys*wD#l0Rnv+ zA1dE$JG7H`)N2wr5-8pCb|=5Do)9fUXR50pKOcRUl-f@8Er!>eHlEU3!jrG24nt_YJ0PtV1IM_h(@<;zF7VEOR$Pm`*6FKPC zmQd8bDGS%R+%7BIm``@HA&ZvP&Y~q_y~M%Fz1pqXy?VKBg}6;Sq^hYZyT*I}gt2^6 z_x#rv1g(AjZT(iWP@mQp_J=yglGe-V1(oQg367CYSJ{NsIjq&Wr9#juRTF!}8Z}F< zwB?+sRUOx{QS=4iB8*Y?bIb4K_W=ly4JeM!23#^tTSg#Q*=%;&MKt;dE;SlSw*Wp= zAB;WTlK)SB7YoavzAs!ip z6(4^dpH7bv9UE06Zh139lGjcF_hmE|r8(5*vsE?N-n$+Dc8bl}$SuS=hx4bk{zplO zb=6cwrmu!&H{?_I(IMxuLRWpg6DLzWAwkx_cjcmdY3i&QdJrK&e6o<7M0%!VS_NZe zUVV9l$%It^2c1R1EeN#IpR*qD;kPTa$X2Y!7x|}wEs!l>HqhLVu?9{PwE{{ViauZ; z%(}&HNIM0?4uuLX6Bek!(}ZDwISEh+fCc;J;-tdQLCgzdD=!m z%+XeZYVhpBAP6xvC_yJYZkSh;R~RA~e;9v=F8D6!FA$qhpB@X^0MY=;03rwaCD0FS z0Tv2W2vi7&4U7!{L6Aa`LR>;$LS8~$LO4J>K!V^mU^XE1;q{^QVf7*P;q;;OVe}!g z;j^K$Veg?PArzn$AQhk#AY!3oA!DIpA$$V5A<>{gXprrJ@PY6FFM%%sK7l>~dcuS? zm@R}$khbtmkovjUR}dbM9#9?-I)RMfbFe|+YJg6lPC!OrMgU$dp zU^Q?O7#q9~{y&^e0CJ!l7#92lme}Io(#?hChUX?+f~bIBP?}FQ5B&GN<~_x}2$B$_ z`2lrmVEBey0s1fDUyM)dz$9cf@Cav+Q2)QO|MQr}?=|iW6HED@YDSHvffLiMDW(LM|(& zEJQb0yJDTA>b<<5M~L1@@4@b#tzP7z$9%K_#^EbyO_xHx$W!JqM|-UZWq1j1;U()c z?@ugUlV}EW_B?AiU*$mL66=oVu%6xMKUdIpiXvTK6w?+sg(V3%x{x~Vuk?O82$-WF zl|`9P0n^3zRRiBOlo-bchcbV-HN@fImx&K${Io$;c8Tid&@wxsEW%Ogyl$o;EQ?6q zDIsa=N%`$nLU2sYiz+l~$y1q|)bLCFrK$u!TWv_os4xZ6#*z_~J$CFGmWNe;2;_}i z*e~JWMVox`ZQBa*-c>4-du)w+4|;NB`GnP;*-ypQn4TZ>{Lc2O zG1KTud=NF(4wHR~0&ux3khFqdm-d)mAJ9}cp7Ru&Ynj6{(Tru$3?<24F8j;xU*Z{+=Dj{eG9cfVA|4Vu0D6lHxGWGA^iF7YhU7GXb7-!iZGYuN?) zuF{oo;r_fKv}xxs{GuZDvXC$8+hLyi%?#&C%r`Ht^FmHg--nIY;H|^$Qzvn@o-5jV zB>oM=`GEW_vwz~D|0Btr*8`rQybus1s-3(uS8#{tUi$5r);m4BNHT84Mwe9bgvUEj zzfeP*d+zp*w(92*&+Shv@{gFJCz7b!dG-wRk8d8$CgYh>P7SSZKksK(KvRQC(KG|- z&h*(c?!0Iq(p6kyT3TByWA7YeE1wsSXQ1Pys6(m^+UC?dY+9`i=h1h=lv&!iuvw~f zV2alQkL*Q(W2VZERmf5;B@-7{slgUBcd(`DAT4!x$dHqGdc###M1)pM7Ct25ztSMi zX$K$mEs4y@`Y)Y+mJZK(%@2}j3Y*_&HoY&;9~cZk=g|b&f{&dk8-(28lV5Tun_55r z;KyXS;d_Gzcm?QW7d&7W%~~74(QXL8gZj@aKhS712E4yMmSuT1Yi1gD;xd+;6$e&pywwMXL{wh~dnU zva|Vc_M!Ptbl_{1SeY`z4S~IGUsQQ{vsC=DRi3f3f~56Gvr(QAqQGw=@@N?;o@R4 zBk9^@W#b^ZNHo6&VYCP*S6P_|ERkLtq|24^_D%=m5UV0jNq%4D4vyYJ_%PVW_@6ll zU(h(yC@Ze19W2G-s^G=L?D-oo>b>Ye@zRF+9^by1%{(~U>P0lcM5db7i;c)~Ga(wa zEU_rxeDt?_e;a6xBR%}#ud8dB&X7rsTr%8iRpUlI?|_5WD2rbm<2JB*b-c52h6 z$??S_+2cP`K3BK3V&YZ4baLdICZ0Q*HyerHIJ9dtfUJ<%JO=$u&Nx=vrlm?8u(G<8 zU@j9rG46C@@@KhzO~5zwESX`7@d8x-NZk$q+1BZwmElmozUc&)nB01Sthz&_4!7$% z>fk9C(le*1XPV~$gN02I4d-qwv#db7{L7}cs4d7F(2w~%YB>8?l^^npOW^_Xe3HVM z4&UN^fV#FGaxux7ZN<#vL|CWk^>v2hD!9OO_v2d|Hr&L0o#3%HS{im5y2}#a}KzxAA{Mn;dKt8;bdPK2})Dj>{mG*7v&+$PRB{Q%9#w3r3^UG82;+jQ3un;&0igaVW7dqm6z^gYo{Z z(I0*T+=NpWu4~Ak9e?F!iqxwTt#AOuZmfhryb^l2zpZHIx@`cq927a z{SbW#=UXprXQB$@*$bw-F6$UZ>i}NyT*EOYqjf}s6FYJJTIV5!3DjRLEisy4jtWv% zd(DoKi&kx9tqhRmj4_+7pkhVOe!q)i)t%UhET zH)l_GF`xK&$#LKVZ2}>p&Z0^QRlsZt5lCsD=ZM1rss%RT|DOJB`d&`Yw1=zb4Nicp zY&knW_&fxhZjL;vCcmNy-EVUZ0-)r4-(DjG>OHWSt_7OkkPVUWc)YNpIv{VVg0qT8>G~y>xU)eIwbZ3C#{Fao|%H_5G z&u|H`u!r00%a8jIC3pgK<_TcXf;M502w!msaVAF6yj8Q?Xd#enr!E5b>Uz|DxFAt` zlwl1mkldX`TOtpF6aeu51?e5F9QdXNGhsPDm)m1xNEL?{+-K={1+=RE<$?sqCz+Ct z588mVSn$9VB3p#&pjdJW!DSrsL}b+uib^5d1$?p))moLvF71}g`LjWWDihmoC2uC2 zmPDqq+1#TR){5v`W%U%?i)aej=+nz!7*&RQGZQAlJyyCsZD#*A7#PuDmi#v&le?8l ze@f`D_`;HtNb<}}{Np^ppkNKg4iOuWRVb9NVyq~$uHjk+&SsIlvS%&B{U3IrO!-di z2{iglQuWgAN^6{pz*=o`u7;&q^fkg^@h0)1aD55s?*)Y^jj{R*g5LN50pAI-_<>nN zf>MjucVPY@tuE?jNwB1X*#cgmJx`h@+A<9L*YAPUr#CJM2^RDtMEQq5{rG7?z4BNB zh7S#;L#2mTM^UL-2>}9G7?`I3?X2kQ#&D)-(Mmi1Li{-u1Pc=D*H3JL(hQogqUC$W zK`#FtzMPRN{@3&4DoVy87k-7Z0NA(u0b4~vUe3!@YyYEI0|TL_^CutV!D5Y-`8k{( z0Q2_1)t||C&yQ(~izAQLD}@(OurLHl9}g;^hs6D<@@&sz(`RcNt*d*TBz)zK6}oI5 zTp(@e@I!jcG~*hAzHfBos*V*=D=nk~LeM6|zg;jl=ic39&0{3P1k*n^uxbuyatHh;JD4I~ zS}*G+A!hd9ZpEB_zb5nPvYFtQ^rPh)pk!uMQL`xta7)uGMSC32PrZF{h#?cshf~UF z;HY`US0W5gq!8Z6os0o&mrU-B^DgO?^X%s3unqk~Ya?ZPfD58QZpl6TKCQxR?j|I( z-xSP(?+An=N`M?R1=oT|q1LPQeGgE;a^umIju13zp&$Zc7)pKCshhYf)cwFxpkQR> z{e5{egUz*(V~YDR6DT09aFuPi+2XguB-LiNbe908uQsg8@t~ZGCs2LXb0_o{sjIe7 zRfv!p)|K*Kop?x9_CDdXKR6TG`DSC+xOu1Cwyp?tx$30fH!nax4OFh>5w*-vEN7U` z0EJ2|b^`F;2zDw7fOthN?aKSsSSx+x z=^8hnNuwp^!hu9liTzl86>y~j@^Ow9LU@@TCYwWJd6#WHLKVsTEP)K%W=y(Z{Y|zX zADk!pMc0MDMhqlExk;oT@|_s6EQ&=Uc6a$d^uVi2Ovr36T4T3`}r$i~#Lz3(NIHP48 z(@gc8BZCIpY9-@y@TO0rSs%@u)a_DTb)L99eEFk|Mk*br_B$*L`3 zIdhEK33{vMu`p1@GmRVi-$cGfZPjWEz!mohUi|XZ<=@!0mgCfM+CoMk$yl~Ago`H2 zoNM`wzS9g!%iz03!)AIbGREmvIn>a}&koHg_OQLtVhgiaKCCA_zf_ zxf?5;(GSTY(3zqwBE=0+7UT&M0zCy zFKfvmi5u3tF&r*h$v~VQ+kx3_DQ?~SSq>SKDZ2-JjyZCb4hy@(9@Z+b18%y{c^|xn zIFw%o+lFh$glBLJE#qBvlbOnc0BE7ReiFKv2U-e_TGPdwf>6`5HHhnK7OSwy@9iDC zvt5!es!en8&%w-PFw@P%4heE41GZmQd5`ZRwD~)d6gYN0tRjsA{*99)n1gf5+s`)r zN9af}1hI^;RX|LIthq5pYO@ffSWdK)MIJg&kt=@O!wH?scP%%)@}|x+We~Z^4gFr4 zkYba+p~C&zFkjqe4F3XJw=Vw2moN{cpu`7A;HTI6AjKXC(j;~#COWQ|$1v(!;enbg zV=(&8U4>YF7%rci-Ak_Fw+D46Sd6+zSC!O>d!)P56!LHF)R)?1XsK1Bh`2O|dnzbe z_iMSGX)LUFysR<{I%y|6Hb^v273Ik$eXm#0U}z`~vuKUG2q#EhLs*jek>ppS#9C9p ze6G<@M#Q(Pbv^U;7>c9{>vv-8(Kw{6Suw%#>1ir?ItWsG`koYpUJvfy1|=QNoaVSQ zyxVgsFcDVFXhwplvy3icTzJ(3Bu{!SY{cFP88V#;msR7xGJS~8IzSVh*@@2KFT=eQy@6l&C@+E@sXwz2^>t79b zKOfFn9g7p!hRWlQYa7NlTHI`zYJ;>p7bemt)SS`H?s~47P4q>UfApMNXz;uzV?`yz z*Htr)71h()&tW_Z&>(>(zsSq@!7}O!*cTYF?L=(oJZlB{@pK_zOC3}1eoh}M4{CR0 z%a)zJHFY}S!Buj?9ZBj^YNjNqFi&Q%bo4j912l2mSb|bSJrxLL#LoGoQ zX;sHYUpTMK+LE8ttNkj?Ur;vOwShn^-g7#`Dl0i|zBEE^ZqP!UCdjhXC%;|3+!SjM z_{Id3+sud<%n1ValqnT z$WSkAYWOFJImmF`TZwDGrF2PeLQrbhSlyzN=A;-aCNkgnVa8T;Yh)SjW@gF)^c+h{ z!E%%Hma_Y{398n4z`ySD@mP10gWhgE#P#?mgQ4R!INKbxS>L`p(>^|H5KtHRP*Sj! z0pv=oN8K8TqO0D*W=?u@V0Lg6;lpmtt+>H>EKqKOs8vQ-{h-c=>48<^D}t$4)Dj)} z5Y`u#oF>@s)p0}#30#=MdX!IT+HSW(9dizP!+dN`1Z{Z=31=h^Ktq`Z_Y_;mbm#K4 zgIBrR?&Q+z^Wqu(g*)ISaKP#X@wA@YtidV;PKNS7cPJ)xO3^x1l5JApUiOWSz5a@$ zXYLjWVohtw0vm1$jD6Xzv-J(0+5LrwdqrwSW;#)7XqguvcUnsq6S@(-W!X?HFj3qc zFKzIxKs6{&Y&oOwmwLLsDZVK(#yn>bH&E;Lo!EjtA@hNb9Relxh#d|p0YY-tn!f9` z$o&&x335KBh$72UY;cxRc%FysP2d*_>nTW`v2fz{bi)i4^($ z87|Lmb`l(3v0Gbs8bkAq4?Q;S{0;4YZF|>+eH9BEMpag}5bg zE{Gyw*&C$%8Xg#5)@sF$L#D$Gad-YF@Wk;+U-jw>xKs5a;5AZtMa4KW5^VJdv02fQ zfM5~}-B?J~+fjiVp3?KLQd7IF3nIR#5i}dX*_JSO;jD{vDat3;Q&1vi?ZH8%rcX`* z&_B2Xd||!wmBJKqTU_PP2c_L!LPoe6i}!owKni9a6s8|sEyAGR4??PH9{_Y94N9Fc`XaSJW5s-VkCAzt6dvG}U4Re#sVf#rf$ zm9AbnLQY?A?k!@t)jo03ZNdBQ3!V9ow48OfQRIiJlkU^jNrx7Lvyt;|knODK3-8Mp zegr$AxFvnCA?_7~hV*+5=!Uj8{Cex4Jz(f@=miOu@VVUTS7#HaDT5A~I1G&)tPRqe znpnweCmDJekE@-n=!M+n>+2n~nfTYHofYBkW)qbOP+Rq2BJ;;=3g6FkyM98O7(1E7 zi*oFU-{vai#mG;@9Y2&-4y_0W_OAJ%1`?p$tz>49tb5sK$NoODgM1yi*CK!w8~D4M zXJ-wj??}tkL2bVrY-&@-Io=tc z5KNGGhy@eUb*s@h{}p?r45CF<`>keWr0X*NHJ9Yy+pSWX1+jCW9+JH3PW~yB%k5#O zQsW)(o`3juRy2)70up*??S*dl$vYgi@|A0C6I!zCw+XCDt-NbtgxtL4Zg^yJLacGc zYZvV;ie(-Po84}hfXzu5a5emG?eYgWDZYp#Zi}m^`WzLssqjZm^<)k;3cY zXD?@MQyW@<4Fk!KF6V3LqtUT+5P@o&xyG%Tv*F8bAk>ww_0wNt`j6#j5QGqIHTX%cw1ur1!hLhoyakN!>L{|hKezhudHn;9!Fa4W`>vzq_CqMlVG_| z8@W_>g#u+}?-S957wg!Z8Dog8vR_Nf*dhA7{!>q;^HY*O4%SxyBN*>dLE7exd@h)M zwST$G0#JC9eHYHA@-cqO8c=eTH!@ebr}lI<2l;f9uK6IfEAy03Q-TT{)O6q;8z||k*h<5pyC5Y{r2`o?2a)F~nPL340F&Dlmf0!Vkd>Pe zp|@!dDiJprT49b93=FUSDdLp*yQui?g`{1roGfs<&!I_EW)MGBV@kX7vHWpTsR@!>rl@-5{1GSBN%7=t zKp6yQ2whJHucU6KIKzkYmC90!&sRlw>X(RJg|Wb_dXbT~7LM)CWZDPxAex!>QyGOy z2d|s8MCu=uxSBGU?!RzHn(0PXeoL2M(p2XFJDY)0(DERulBq4UWV(8U?ohm>Isk!M z86qftnub=!feqTG&dVu#?6;8U!d^Aa!x3oy!Rr~ruTa8Fx}HJscbE;FRjLQj#rw^9s61g<3$k@7Bk$*1GC2i z8_zsMsY|$TEt?9h8}3iQ_OGWF$pPblWpKV<`898y5E;PiJsyUSE)nn8-zX3L75s&9)@+*?{3$;xh z!~}nky9vv-t)TR=u#WN&yT?}DcGdO-n_OoTlB~MSt##1NcKVg3nQ3B+^F1$I$~b&{ zx9Qt_9DFYmoa89?dGOHDQZF-Ij$Q@@PJyd7b^?D`l6AKIg}h@&5c|iOBCv%)u&`ct zUjfLD{!(##3jex-V7dp$8Pu-Hp@=>U@sjpRI&Z!?S$V>532_Of2=z$8`Nq)2h|Tyl zJ(Dx9&#kGMpQn-Y5|Ravg%WGXBi(31rD(kGF7~F-8fK+`iT}Bjfi@>;_J{|xVB#a2 z$oth0%|a}13fM*2EB}e4NVhafi|!g6MLB|`*E%LnoAzV>cVS$L;#5=BHfqEw-i zh~j??3E19SiaK<#^v{WewAf#OFXP; zVD3%&WbuJlS-;$Uq!BC2(@LvL^{w*8T)PnG_Y2ZF4^Li%iXm*oWU)?=g4twb?nm?y zRE1_Ml$B_1kPVp%+;@!8V&fxzL;;hD5_Mgd9Cpuab1mg0zGTgnqCprR7#vcpm*(i^i|>u+DTh34%-&|7j}w^gN9DRfqY+ppDX=+J-g!5#Pq-XQRB z=|a~;o5PtLsBa%j>I}A8nn(yYQ0IW-F`&sr9F(pezHq@> z0H&2Xts7Akp@$7DAj*H9EUATqHV>FQb-AGxwww{I1s!ZVBcjI82?u6wSuUm4x`D}D zb#W_d07HbS>P(VA`D-%1KzSR>|z1!ID*0WO(0Ky;--Smb{Ii*j+cKlY6n0ZTS#VaaBi zk_{mx+dK4KgSL_bLP5NJzJ{ONM%_yh&rtY_6i!jj#4F)~_3drC13E&ddfPc72Y}Uz zJju~`H6|H8R1JTN1mLF{ej)Rrx<8*|c)Hzjei%!Ynp;{f-ZnBcG zzUmAD-y^L^7BqPxVp}-n9yl!5`wxC`gK+GD#^Q7vToY?4 z-A~?4{uJxMR$*IlGs53whz~+84>AD0K`$rZ#lci|Zt((yGb^(gY=8SC{3%P1g58`} zZ@`PA4ocrdrf3>s4k+({5BF=qPCP|B9CRv9K{q9rhW^P0c!>>O4m8_PAIfRKHl%N& z$@app*)@OXO{Q(_Ccp{d10lFX-px+We9y~b6 zy$}Qc$I2g2lSWX#Y~j};Ycj1lFT&2d$Ulg%`}V$<#rA@C)5{{bgq(QKN)93S9Gp0^ ztY94lnm*x(5~x12MP9)!%d8FR)z-GMb~RgkNw|NkU2ziOGG90n(UI~u@@_$Yd%&5T zylqw6PKUwL+xpIm1LKY9Pds|vz7sndIA623Igt!`g6Yk-7o&q7oa6aYX?n70FkzYA z)SORP)@&X7gEwkb-SMg6=^h7pL%8-O!OTj#>Y+lP`ptKC$f7rq}RUKj?O*+9|yjzSWf zq`&x73e2LjXh8>AIBiYKIBYs2#tQ-17}$TZKv7|RM^wl{k#rd7p{mA~CR19LEPxP} zmaFTuHt}m>^UgJ`MusFPrAn!a5AEsCZSV6&3xk7^vYLwyt{jXq2B`jc`Z7fq+IM1G zgT`Re>Nz881ZM~~!QMSxoQ`IK`jO*L>^S(t;#AwzpA(nOJZM=>J-y>eYF^N@jaxkYN}-As#*L(&f=B|5>aU)s!Bu(Q3HQd zOSOpZdK(8Eg){L&aKKhXh6JVk@Pu4kCxuQKsnF0C=aAzfR!&D*_?RKHbTMPO<51Xg_0+%CdiGK{`Bm@6g60MW5~J&YYsXoNr!uLvsBf6YpI7 z03TB%Fcap(z9rQe21Wz>1Eg7wG|e(j=(~WnNC}x`jb)aBH9tjMhdD9R)yn;3*i8V+ zC)FPK90q(P)r^fBLKwa@ACql?Y=@H&^;Gxy3!($0Np33DS&^9zt-gM&9?yTkhq;-{#nz&b+$tF3+Bt3r@Wm3E-O=y8J zHuN`U4#K$3YVirQxnV_P=7!4rh6_t(7e^OAKh%Cre-jrU8y*Z!d}_#3o#}zhvYCGQ zkt+KRtsAu`dh43|>jj_!ia-Ov+{bG`>Tu=hc_epdHj397@FQ1w=6j~G-B?A8dn=(Rh<#KB}V9Z8sW1nMks#c4+%A_jXk5s zMh*b_+2DK6^o772PMlPX$ctQ)vaHI95g@Yif6~$yJ|pT2aOsOUB`QWf_R<&mTF1ff zUBCaEpKQ$>{O&=pZF}6&vv(AjET1F0cQn7Z-;2NT$sgP`+;@M=%=KVf0NbNO$981W zTaS$tj&0ARw;YpaGVv{x-Mb6fngcC(STD1<`e=bXef2VZ^>9`gA%E=fyQ7u-_w94#UR@ZPP8Y2EFiEj}~6~kCuL6fbi zr6aJMZlT_ej7xv(3%Xc}#K|3IH?LBu7-M6xZQ=Wulj?YDe?(6*YPCuSSG30#=ZRke zOong{@rg|gXNO0JZykPOSkWs(&?{rqEAyxqCfo%1J11{7aErf^^8}N@Bi4#A=u-Wpn6p1MoEr`pdFUumpeG_-dQ_9ey>xjX%JXZG0PV z?)puyLlG^Q&r3`L=*hVQZk|K5i`A58CK5F^DHm#Md{yMt?}!-B@m`+PNj*lpej}nj z73TA0Cc>4fa+)AkBOU5vuUP#uwdL5VriqnJW;LbM(3)6weS3X>(h$_UE4>hm`6UXY!9IPrb+ss0s$r)*>^ZM(%)?$)075fzXv46t;BCSie- zN!NeZrctk$w;`6x%k+`*tx51*B~7H*M}RorQMq0@zsZG3@*J$)A zWoY=|DneJA*J&Pz<_b=)`wHyn_-Jl*Y+rvFRw}LeaNqv1CCMc9q?}AbD|UY(_EF!% zIqfR;&M=U&Jzsi>@#`A12O*vD|*XrpCdD(AXIlG%UF zWmoGhn5YA2w+&i?P9vjGDX2{@Kc`!Y^-JX<{+E_xw73W{BLAdF5Mlf=48v$5M0^=O z`j3vk0EV22$(C4eo{PKFWV}*DNp1#=wkCNc>|E@m4Hr4o}J7jpv!$Wk*din%h1`RZm@qmmnSr2 zzGZ_B0tsL=$S~5ImkiZQJJCLhIp#xo3X0Ms8y8uSSxGy<8Gx(`5+{0ga|(6k7OQ<> z?OZz$YkO!E3pwV+P~Jo#X`0Hmqbkd+nhfi8&h{MGb+rEYLpc7ItdOKV#-&;JN*aG5MFUODp62+YR7 znph|YOCsOYb7UoL^Z+N@psIv!ht{p?+I{y9!e4fz7yf;8>x$5Zb;OOe*#iIK2jmfe zVgs&~@)Ci?3poo6#XSuC40&+3wByFjG8!g%Ype$7XltlEt+apmUak#3>I9-Z9*=N% zodWmQfdk!sJm|-La5wA2gFf7gj(YK+7mw)iPx)})+~SKl6>rM>yuiZ--_GY$z%2XV zngaX=<>iC@v!J1`Zpf#x4{3_!vJ8Yh7MnzoTuk~Ohw+>wok{po62~m&!#M@IkOo|+ z(uY*P5d+grcBX&n$^(uyS;}&yikl#h-~>sOUZAx0I=8#drlU%~q!jQ;JBuqIsHhS} zen_YVpTpwjY4R(SO3l)jz71=2C`P9yCs>1u1iUA}hibvjvc#LPE`?w;P%cp*mti26 z1ojO{F3lk5;NjX6}fz)Qk;2guAo^Fj_VIeCzm3<+;eGM$;s9MU1d^1HWGvgKX~dD8}$RdsKv z>fZnU|M!2dw*^5PeQ26SX*W!hc0*evU7M)t^U<86k5eogkTN+VT5^>j6BFnJ9JbQP zMQu5{viXVtA5%HN=c?g76~R3n8b0K5`pmTIr~jm)&0f1Bz@b`HEMC%}e8}zanJCqT zAFC+NWpf0Kn6~&ywSm_WIE;kZS^O#tDJ_G66S}}CZ@Or zQg!hJek&L~e<@Gudx#B;L<&-*4Xv`FqHJ~&(V!u1z(QJ7f@)opgjyTnuv$@9pOxbK zr~%b5GN71yvVf$bC1Qd^1%oFl8!1K{I20bdG!hMb5Jm(im%_Su8Kw&+HCBHUSz9N$ zIgEe2Rm(bnG9$~Wu)+NvuGf24WnO(Vej!Y z0XZVHv36pwVL8Hv)Bu$GfGp3{<|eo`65{hA$$4~B&cOsuy4^POF?S42#Bwn#7Ncw@ z!?O*KP>1k2#aAgOwsUgqY^AwB;0~NDfX;t0^RWUFV`Kjw!*w|5AGVz=glDOSM+)*i z6qltEnX{bV;k{;5&a$df8I*(NY)+39m}sgVNV~!Vg^sm)SA8(hQsdO8{ambI@2DaMeKT`ne*9slT&o0SH+L$1h)_h`9ptz zD&Sa&gcV63qe>1{U>?0M$57_S9Nx#7S^Ue@;jgI=Rn~%$6Q**4!~9sm!FV4pFtgS8 zmkZVE@Yf2UVP&e6oHe%eUKPqVS$a8{3NR6}fMa!$Av8;*f;ZR5T0H!MFkAOkUB!%NN)eW&iRR@-LimM(FsrNk zYC%^yt)-wgrE=Eiazulv{DRfP2@DyZYRPZTNbdHn+1k;tnvqN22G3-}5J#_1|$7|!mdt18ahNHnC8Y46+9Pmn_xF!~JCp&!p?vy8*f>hrF-uWP) z`T!C`Zk3~|Vk)?in~mda!FhB;&VtzZ69$7ecFYakykgaH_pJ7$c&MCzHM>+~%(#@T zgx0*{XC{ZBx|TOBG8PEm+V%9PD>m=**!ZH z33qQvbKL`$aMB6Az!CHkwiD2D3o?joF4fHVAu@t?B4Nacjv`T{3tfM6zBUSe0=8DT zWr!MC^)k8vu_0^G^|^3Bbs{qz9yyUSuQp@m-eXKmji=qP+=V-LEIsSqfNnUJbE9q; zPNt!I-8UfF=)}d@(%1o5d-1)Aiy0*#>YevtD3qMc#{N={iOkhzR3{4H3(O-Y3W%8n zU!l|W9xKS-@(cM}(z$EB$ZFtPQ7-(Xz;YHPq`Y6ls@QwBsQ3|`}(E_dUv%ut3!6TPZF%&whi_6xcMAS zf4`~0R%O#SCGAn0K9-8#?Gpz&qfI_W^}nqG9~EQ0b&_63Sy+Fgh>@5%*ys&+BppI9 zNA9Y!NcicyGOpB*Z5Y4V)unN+el_zqro{bSqm(@VOb$EoZP{Z(xT>zRa&q zg^h3q?Ffx-%rS_a3tJ4QqO;xumN{as>?+JuCO?1{QNyXiQk$}WFtaj#I{4ZCLF}-a zwV8N{B^nxiy;Xk%TUx_p=}5%J5=|p@0`@DgfK_l6qm8{i@tWev@@L#mz=(J7tN0I* zu)l3h2CU#I>qL%=Iihzpg@TG<)-y?(x&j{;qG(z${)Ctm`u zBfSObg1_R~8!DW39}|pZEVs(sddDl^-XuBw}LnWnWbMml^OMoO(+;nD_SCjQdn!WrxeWO`kyp zmw$#sS@M5031vqU{u}UOJbb}q%H0tr8HqRB7<6_fL zt?AmNa`|UW)PaBb`dCBU9<m z;g>pQMr%TY2YNcDN2;p^4)le4Qcioar#jM|bZk-P7JXYrXx(z8s#ii7D57){^=fF= z%b1-r=DB5m%z~65Q<|VsLO%L702+jbX3QJ|IG9j58)78r;|(4` z4J3c#PdL$_CP;!7!;a#2Rt|fQD{RrIG^D`--Xf2^hF%13kwxw)t%VG__C;M52#u37CWx`6~SaO2S#MS*BX#jsP%O6!M5EvMdi~j7Ul)E|R(vdi!R#A>{ zI#AOVY3s{Yxicem&iar<6W|I#3jVmOE*j1DXCwHH(R__aGmOq+)&nhsH8S3i-D9zY zbE!~ERM6`56u83a2tBK>lAJzK2*~;RJ}_R-6Hg*_$a;C!5uYmrXPxC4+BF?|%u0Vh z#!pATtS(XM1vwG))!)DNu8cR$u4z8>Si#EYc>1vdKdp{_S%I6fAvv0@z?+=H=T?}J zg=L$a%)`;=NlI{fn8~$6G)?PPkqQpGAAF+kcSocksUissX9bbgkgD-5Xi#+6#oGuC zp;Ccgw~2O#So~dW9iyU+GEKjR!TNurF(oft>nj0H2arq#&ZotzW-K%A7na~=Ru)WP zC6=;Bl?FQZ3l%^!Tb4`YwM-(y3X2GK*y=M{^h9E3ee+1IKmr+Mva)1D#?xB``7IAP z(Gsw%i$UIj4nocgQ$>q^-_sYbK?~*2;GJC9n*dRnkdTt!WAwP-Cc(T%;{ku%cq~V& z-5!r#n$gc8GbK$KwZaWJ^jkbt>NHb#T1UcxDp z*KYTjDg0%E(6V-uU^kNZaSXo))37$e1PgdE`l3OjQ^VjS4f-ECPNxBL2H$2Y&!bPO zH6#w+BMeZ(I=PbCT!}Y&(X)Sui1>k~qm7B+2eHsPxJ9x+$7yOFJrA&i+ecT;_-Mj0 z!zoflN%eWH;suv5o}B4Z*}lRMfC}YwL#0MYvVji;l~=YX1$NbHY&C(hZb@|V8tkUq zaIMiUNN$#V?kN&w+>+J7Y4AgHK;5;OB^QH{Phh{*>VQ%NNb!v~z!HCz!?RFbQKJVZ zNj35PAE=kKX&DM{sKe(>!{APDcs$8MBs| z(O!q&PI8Nd zMsf~ObTTM`F?w31(Nlk?Fo7}YRG3;v7q7?A2h|iH2byQ0o`c}$Sf z_ug^Yy?0#W-UHQ{#z-{N7%jd?1RA1M4UOR5uOb*qE&c(0pV)r_kW&>BkSo;?0oy9J z8;tS?Z^05IL?9=Bh7j(9w<-lO9$bpMx~vNG_tmfpvfZfWP>tDVxB1K(gVq{$xvE61 zR;+Tl!d5MMeL3a`e@SQ55hPe7f6jQKHk!6YJ)RmXO6?-JPv+gi#nDf2%!9Z>UM-gnifM2N7zbH)-a&!Nb%?B z9P$gG!#QLQMO*9$`wonkJwxZf-y-{tbU`sItCjK+4g7!7qs#$G&fQDsc`MIZC{%Tn z7X6adZ=oM@C1W+#w;IUD57XL3!5kCOqWRW>NOCWLCmjV6|<8kn9 z7UheSs?6%|sPra>+3GQ>NNhr-=bdJ&+o&RbuV*wWwVv0Lx9Axy_#Ts-!{J&?VrNwE z{uTFs1*d<({Y5vY!qI;R3&jq;i^GX$VY(v;CGoJF_y}en-oFbyi^Z^31n{_0j)<*H( z7>y!BS}2ppB6>_(w0QT5bBQ2+SDEP1|5g60SiABZEAx{q_69-8m9o|&g;Ks?Z#t70 z9a>*Al<7o7BZ-WT^o)?kp;!anRv*?mMv&~Qb#--?%!N9r^L+5Z1@^rUtnY~LT!3Y| zOVxiMxKf{7MJG||CBbf2$+fR2h5{&a`Lt|14TkWm)QjNxbXqPKfgw1lAsz(O3nwE3br!niIt#*SW*lG%;S{m;$~p_e*#a{TgUy7~1#tay*I7ul-173_ zFP*=k0UmFkzoFr@fAFR?tLBEo{=pm9tU5Ft#`pt2dVFm3ksn=OfXCxM`r?MiW^#Wm z`;Twf@aQZ!J_7OT;-4^56$Uc1MP7#%dLEO`Ae@e#NAqXwn*lS=W*08V)i5B#(dF#$ z%GrW^2?Dq!)>E1`HA$9?Fbx4kTf|7p?6wGQGyNytVjy5TD;iPhIME1;d|0&**r;g= z-*kdj>762H<#gl^F#0-CAj`UkC@zrE|mL_u+!-R=q19~9cY09 z-Ii-GDaixitu9M4CF{%e6x4E}{JjLlXcsbi!JtuT89i$58LQ{T+JXAE9sPd^nxp|@ zAv9d`hU@a{?%W)gI_Eb27OT}T6wzz68MSK8DVp6@Ui+Jt$<!rt?U zfp6to#m0G5%uxgDVvAlcF))8F&ZE{-#8fF@Eq*b>mcpcDWv%4K1~5>TgL3Zyxx2ow z&Kp}S$TxtM6s&-Brz8)OK^WYbBj20*=3_X+I+ZU2jt5eO}`s*lC3-F;zV{U(5cOW}<&sdGS zZF5uh;F|d1k^A?w2rd_`H#-I1%8;I*+uOFjKD{yPRkMR zvYyQ`8_@qOFR~NVq-uXcF6U8K2XzQYjxbsPuB!8;hAeD(uAnrPt9i(6%avx9%$=zv zYE*A*AL>!n@y0h{(F4)Ls~6^Jqn-q^K#Ts7FnMZRzFMc@O-3m0!HPBLyFK2( zdqC_H1W+yB0|aIbq%Mag|15Rz+q0v$ zPGo}|A0NK?>4Txc_Bf|ONu8DoW`^pw+_gT6OZlOViG9a6hh7jH$MS)}o~-1_ZOye$ zwmH#nto!;+ec}GXec#%$=Gniwe|L-4VB{QN}w zbV!on&le!uP(wFzKgHXPp#W*LL+{FVFrz;qJV1Yrr;x5v#l`=D#4sz|1P8O`C@14{ zn!pd0ei`co0zQXz0(G*!nTRe}H+3v5O>RcX7++Z>g8>b#(S?F_6Vb6yF|nD;#C<~y zUoz^HOHc-tG+s+O4U15gv(wV}Pg<{gdf)aZW?QRP9q4YE$a!jaetFlnhbCg4_ASjl zGXsC&pUv&tKWEEqXx(`*>g(Fwm7UDG?mqg(yU@XPcZ}6kjodWcx@-M_*VVOpJl%2Q zSbcotKwEn2+FqBhf8ADWYVFjvb)k;djI;j8!jrLqT${(;n(wQg+P6=xLxJG)K=LGz zJhD?>0ghU@0$|EnGgtxVLb;;Xt4_lOPz`@Yjro+ik}D`aXPN%JxgroKYRD1*{a25x zMVbE9QT;IQuri_SGFvV_Z>n8$%d<08t2=9XN=524+Hl*N)YJoGHJH>sxIS^< z#8@aPa74~(q;+Kt^~-~8~?BX#PH5!^HfJb)-!tnCH?4fOxPu3!Y&yTT1uGEa#c)d zDPuy*Uxf+rAJrC~JAC^yJE{@~o;!TxnVnTHh^>1^`uDUu#1{GJ#Eb<66CQXSV#2>a zyz#N=Y}39+HY!K3V!sQP!V{`0AlH9JN-MSslX48_0N~-P zu8b$0tq#&z>Y@ID7ua0 zZ!`ui(Fx;(VFgR0#A&hL${^Chi5yM7QU;Nq-m2v!$cuo^hJOvrN)xiJj3j^epNspz z50aY4Zazy3ak$k-=1gRqO;un>Wl2{6NYF3>TfSHTP07>>=%_5|tKi1q6}3s0VdK{b zz^N8e2B4?l6r)4M{;^tiYJ2PUfjT`wYY9roj?HB^9o3Y&O$}AOX_prOMaCv@ z7NgGK_uH0%qB=RUAMye>kbwWJAjK`qNYSDoMf78U6e-WYM*wFsvPlWzg6PwKS8l=h+w&!7X;I zPOB@wr@w7$)}>ns8VM{lbswOG{Xi~!A7I8L(u=%a8l(QjS8^5xTQ%8_Mz7DJyRv9U z7ENYRe-_QoV;z4vlg@6}eIbSJOQ8cPv?+x~Qz+n89p`2c6q@Q_wp?k`j(<51Zi6Ir zsBV7oPdN&F&^0Y45=5{BKs&DhpwWqUqS1+oU&^^}G8UEpf;*Z-fk;j?Efzp`xCvGX zl6HO$NJ@SlNcw#saA!wu9d8Y=#@O(UPag~n=Bf=qX+wY2w3ZI0SJhA4wXO=6+E=Zw z-TUyS;CC%)080D204U9F$>z4SInbxpJ#q6&P|6r-J;NJhSjrg&k36Hm(t9VHw&wj1 zmfrr|y)}uEonX+mgEjfQTwihpDm4RCy88@bCX5i3idRLYqESYr;%B1LC37PFybMZX z1Y_|_-XMR6k?3z09^rWaN&n*-kTmQM%77F{Mgg_FB;(Nrqz6?ZLenrvnL`KDFj^Vz zF_4;9&ZNp`lI1hW@)>!XIc|B|xjwk<4J5igG*NDoD2G2+o{=kZB<2D7wVJ~6mIh%t zxLWxeZsic3$Hbfjv9S~Gn_-fFL6qDmw`qhCw`P9_x~fs`sGNiy8(+dY5M=S4GmvbT z$@rBsCgsQ~$5Q_CD{{50JdAFo;63sQaMOJGq4VX3&MOa{hl_`!V2me~Y9(p_MDJPw z(Uq9(cS=FR6`6VRfvw2AhwA}}D*s>^+*ZO`BLn%azZS?Z738@B$-mSx`}F=De{--Y zJaB)ot7RM@`Ry{2uZG2SdZq_LKXo*&Nfo96mT%cvhHNP>>KccoHmjJctC$0sf6?u%zGKuL*5;cyXXq;9K-;i(@JDt z!MU#r?knRCm90D~A@9?s#Kw_XDHrzj}W| zH&CnYd$Jqg`$T+rN3vyGM}QXOUG{c^wfX+#2){u__zg0`H*DWcu6uU+z*B`()6BCo@c7+u&$gz{ogLm#&$ecG#Kha*dU&w?&Nm*o{jG-vb9cUZ z;?UP8!%h1h9S6s%=6#RCwCKe@q3?gILWoDH77DGt{27mI^=&_6FIBQ!cpb7mIE$xs z(CFKKwqRFkSVmvJhRs)T^qSH4DU(~YxXhYgv1T2Ct0>JUG;gsPMHiv3b7ul~< zvTm<#?oH;jg;Eb_>`k3ks_oY)Iki_BdmBF6*eigC)aW$}_dwSHK^gTw>D*Xvveb0M z>eptYS{Ry+sWe=AXezb-w$Uo7{m|xbqwgEJZcZ|4Nq`y6POC-#cIWiipvT)>E!w;i ztm33MaeCJ2Fjo)k$Ru~n-MfF`n=p!b6QH+%jN9)hC8%Aqb zi^S-7o5^hjFieW)!;!9-l{8pQMyr945?xj9hAL6=N`%46GiFxLIPE51$F{b*bsaSn zPDJvXz^V=|eu^Je`2j#pC|2EshnYtg&xQyBiO-|Yh#8(`mclKL>8aGijSlF&}NVKmu1)TFA3Hm8ttORfQ`@wN?vyhH>z1HwtC?grQKEk z4Hp3t%9r*_fDv9#FJbnk5@v6lN7ZHUl-&qgt&r^ zmihfvkb3Y+^DbPIoRWPbcFlcdePVq_jh-E+Y4bB`Kiv`P|0d;0j>9r*8X0<9)MhntUu5?cV#)R%d%t zl+|D)rJ+2v`N+_8j}J39bo7TdesMJP?9^^YYje`4mh zT|v~X5K(i0gh?X$u60+>yUOIsWzDW)<~5aDTcht5thZ7{PcF>s(ifC-KHzQCp$aWF)|Moe$=-d(^&3+p5&4QVS!8>InRxiNE2tFv9=b_M=;89dDC zINPNHw~Gk;@hZqrbxmYA92q~cuj}C2)*x54`NaM`$HuCjlhPyU?CQADmKtfuj>LaB z%y`Q`K0FLm=$ju|f9pRyJluc(Ki)U@jqS0v!pRNbSkqcK30CrhV4+k4bQnX1%l2R6 zc_+sqBFzKcH+Rmrvx@sz!CNpk;8j`Z!SDb+=YuVBQ~|UhxsR6+Rz;Y=lJ&R#n%3WH zLQ9hiFA=ncP-$u6|9r@qbt=fiPz`?p^n0V|G?G75gRAXuw;MGBcfDcN*dM;6MH!cc z6BwF!3RmGUuwVV@91wPki^xv!R!J}yB>rP9+`DBPz*8}Rr%3#5Eu`lrz`qJt`g75; zmF`=zSC5=0Y)aYBzh}pbXI@6JaMPt zcD=bcj}^a-Zbl;>&)enV;cpX+T{JmFL5FRlxsoVc*z3jy7T$$6U?B5rDrJ5%kol2h zMBa0uu_W`GWSJjp`_StpVmri-|B}p)HI-z(x9!7%+=UQ7{@gPEYCc_A=qvWQn&tyv znQGbE9id^0oC*kiZ_DQPrip*N-#D8tL9s8wa+_MY23v7l>I`prcxwV(yZ-3r zxWQ!7>Nv^72zs>%h_mEKW4t?IRjF{njm`tH4*&sYGir;!!?5V;8AuOP%G!lee_ZBo zQ6Tm4v(Op`q&|{k5MAT1V=*H5F_8Elm4(6;(w>hs{u)Sof)9QyOZtD0mZibxu%DIQ zU#*k?TUzTiTOOM1pBZiRFl?w}&%;xJu7;qFBv3#cYPCDnQ$2LB$BjqsTEF?0LHD;z zHT~(Xxri-2l4=;L70|V?*px53dG?OJj{EMoZe6`b$Iz6X*YOfV(t0|x?MSaR5?^;{ zb*wLG4_UX}HR=zvtb%`1ehXM7?JK>xImMgXrg(ExXQZo2d8s7jrO%Y|72e!-@G?Yk zj?e=!?+vje`mKfE!=kmE0eei(QA>jU?`WM`5%d&qGnPa>j*Ni$TLtFNhtwkNWl0}F z7ZD@khx;Gc{~vK*0wzasrJ0dgSx0779(88j_uX|@cURxl_a%R|TIyEozE7k=LP7|{ zA%P9};PL_Z{65=j8!*U{B_U`oBTSF^3?thZ8L&P6fE^Em=d->6%$iw@N*fWCRbAZ@ z0(QN-4XVtHjI4@$@m|Cq@4a|W=XU7ff9fpCD@YLSBvlmE+4kmCETE*qzqM@bdYSr| znZ8=v68>$;vXy`8dbvc+o9UGoo#>U9w&^bX`O_1*De&AsWxgB^CMrF%g`+oqvd zsBd-S)GcX8d*2v%t83xHqAQn*TS6PlCVxvdZL_4ZZN7ie#pCi^)hYA)R7LxfLzy8s zq62X|7w-8tpHEeIvtg_BrmCLV-#F7EbGonR->?a{@MfuDt4eyaI)Q(3)8@wQWl z_V2uVV|2JJBxpz_3zyx^qnX8fhy1{p?;44%JG9W7Saauwkt-&fecS_fNUy?J#$-s1 z76x{cEj@p?eP-WOR>KK8tJ7%ZA(!Y{H{95dKx}gI%7NjVupI2ZIkO4Q+`;y-NMbnW z@K=5MXfJ|=#7KpAOk@hOiJMFvCm7o_V2<+>0eCn@Ps#K5%o`Q|79QPwxWrE&&kyE! ziJtmM{f_BEx4y1}Pu+0+$^C_ngHIm10UnRX7VUog^Jz7TfxB5s>$2#vJ0DzM0Tf88>dgl!+eutk)$OdB)$QE) zC@X&!S}N+DBv$WCAoWh9lFqxP-kEQYeYElrQt~8aCC>y_@3Xq_vqa!%LYhKdF_LtypRQiq$?7$V$uB822Bo`8m+C=b=5_S1fB0 z)ikB!*!=jefvQ8_fvS~ucg>-1!$?jiYkmxk%XW9K`ogS3AK0<@i`&|aE)S}rX3eYb zdD`ery8Ib8_q@S?y!uvw`@u~Sclm#{YH)YpRSHgv!R@e%D@AT2_HYv?FmZA%MW~i=l6HF2o?*gH8_ldRZxFh9Fnhh z$4KYe4z~h3`te?mMvrZ__mU(4jT=7?*9x+yAE~5H5fz`d52%BXZgnVVsf>ae5ekB9 zpu`E=Vs`08P!PHYQ4qTM5AkVJ%;=qVEBE(bY6Y|r0vQlB28SUOLMBAmcG;&5w*6-W zfql0ALQTVw1^H|(#d`s;RsMgyhT^+u?w){Mr8%aW+rv2H95%zhSMn9##gqI5GUI5D zmsA())c=P*TIAyshrfIKwsux9>76!{T~J~lt@~FC4&i((KPQPe3TGG~7pC3N-#nLrEt zN?1XUY;N_)v5P%&=qY%7>k)VipjZoJqNrg!utSOj#USgZ2IZb$7BM7&qayJ*tT8r` zp2Ajrf+3hhgS~=5f^`@KMexD)OBA?Ac%u^jNSF;Y8n26UYp;To;KzX!*jb7?jG~~P zOI`kxo36coYdl^0$&G*4!Q)Rjd%S&Ndg+c1v#Wc{P+@6DMADF7yytJnHZ1$m``^9$ zeSAE;;hX!HHe1JUf3o!D7jA3~^se1|3sLnqgIFaAw73@n0T&23LBIk0b`Y?EfEA#i zJr)qfy`x8QjM6w?0qT%6AQ171s2s^LDw8}aNBfG(B#+90lA?dp5D9TE2|XmS=#v#? z+cP|3TeK+Kp6kj_p|8LJt~Bt8?}|V~s*tV|{v}ai8mOx91ZhL$pi`&gGx1zCcYMWw z4A#sB8kSWhT_;QME0pLvW|7LplqLUgS2^m{dzgx=oPYqDLIF_1yT9LsT;*g>IgHYt z$9@0dn2|J?u}$CX!Eva_L=DbWdZguX@({sRNXVkB5-g91 z{z+jTGR@0KuWE{4gDClbAN0+0$@}}V%Ubk~#_>E(7KSt}B(0X(R_tu6fWBLIpC)sc z27M8qTJJLPW`Q#W{1OIz*WGn=F9v;BFLRdq3POEA&Pji_5)#^h2)z{1;*s$wz$gwch9kF~srg=DY>L9E>sx01 zE>H-EHo$*UpBAdl#&i4s;!{W})rQukyNmTcg<$WByS6r)oNf*~6*@dYh*p4*Yod3`!tq0USg=jKgj^$ymDuIy{zEJ1;M$H+f#1*uSA)x& zaS4B45l!XqRlxQY-cb4NYUo7(p@&N{Ae%m=$5|*L;{NdGe)7-fS}Wd>XQ_K3YV0IF ztc`Qk&&*vl)3bu~4j2l2)8J^J9{WgWu2%a zoTww5Sh$@?xSfxaIf8)SBBKVBZ;2=xrcr-je3YjIyYWDT-q8A<2W4@j8|DG~H5J?! z2g1v;bk3|fQxVikH=ur~d1f{g#geb}J(RzZrSP|10QhmR@{hy(AMc;p#D5z9Oz_?9 z|7~}xXq7mvXw!?xLDOmT^lhs-XwHIt*+CQHW!Xa$;^pTt*53pr?I_0j?^ecS5srVc zjOYamM4^Ha-jWAF3^znMSS39E7v}hVI2fidU42SGPYLoLJZL)h|wtj zdz1pP5C}1#4-HlxM3O!@CF-iPq4U}dXvqv`F3V8y41m)jT(c5PeFJbNQL}cOjcwbu zH}-C9+qT}=+StyreSup?BPB~3zs_faC(`f?jtZZ8+hpNEVUQ`EOH15Ie zkOXhafndP!`pMxl8;LR#L ziv;ldo3Bi11{uc>F6Ctu3#j^#3owXKV#J})CvVy1rEM$16Kn!YEj>&ieg*SP@Iy$k zI7sl*Fi-drpf@~7P^~@4e4$Ze9PKMv+UNZz$u}9VKkVw_Buzx$hLh@03Q==iaBw{e zxguJF$#e!0i{*+x80D2Bv;#=#{tEO^oKATGZ~R_G?Nrr8g=Eq}om5;;0Q4X+wHaES zy6)$X7`f=6f0Z*VF2`TS;ar?{do(ju1DqZ=`Lu9bHXG~?1^Bcwn>QOwjofS4_#3z8^jRj2OT-bZ3s&73k*=!P{8PV$#nwnyAv(u}lg8Fm%v0PHd{WpPai;_XYW z?XIJy{d~Z*l%DZZC5y=d1J3Y^uG2cB#3U=Oj2M|M(zHvjiimskA=Iz0;`R6DRs|v# z(P~Z5ZHLM;^Hh)?nlq?SaPSZy@Mqpn$0f^~<${5>tPOqLFMdrN+`}L}1`^ekW3mc6)NV-YBS+yy2KArzT8~lwRT{UJ zl~ted`#+C9bt~}P6{)r8GZ>I4qyYZ4F2*;?z+}~$gR4mnly6Qu9deB#2{x|aZ(f6soM zsH3XOPEXX2bmm!vs{I@3J`tOzD;`*Ee2^269hlFxvyaLmh|H3c)8W~JVjY5Z&hM0;#+%rY33tTEs{Vi$Rl;2XY+a9|b3MH+W{ogAyuuyZVbmOZ z=qnE-x;a~Q;m|i?FJ^@}dl6CwblB2l^r#Et`k=}DN}yI_KN;?uoZq2BVoMk(`rA8O z5Mtm!d)snP3?RoW^eq_xzx=(dXgrzY*?h0@B?Y5~?pq?W2ipF58kZ;Onx^2>d71TL zW>CZqU~j9iSZ=?Is>W_AG9s+Pjq$XyCcdPnACm?))Y}^9c3d(}B_dH^w!;S;8MMw0 zQ)MnANMvyN`tf&}_Rn1rS1nmhQqC3?E?TM9dTS1yz@w|Xyx@L9jjr_6j~XE!5u2Oj z!&io7?N$$T_(UTEJ1}~x?)~nVNvuT zzE8=sBVYb20k^>c6QYFZ1NYN0v8kM=1b#0>Pyt!ELpXzs#lv`WihuO%P;Ebfe;dvN zK(pZ9edsI%c0}3=_W{MqT`^1au0lKD9Q7D2H9MtvR_e&nJ(ZD~}GA zdz{Uur)EbY4||W|XO~;5YV4I$t$T0gIFQ`jF!(&}Ia{Z#ez=PT#t0gE>;@Xv-aE;f zk@L0E&`Kyn+V@)&j;EZ)x!V2^h@k(%z6+Z{#CD4%%SyzlWA(5nj~oizyrLm-9^9L z>Bp?K^6yY-!Gr7;OP=G?F z1m|<$PG1mR{irEdG?IP~>vrSW+HPOHT1g?Z;)(X6g;SIK&((>nZxEl957+_i5ZWzX zEfeOQw;9LqZcD^9TGk(Sz8Za8j5x1CbO$tKL3h~if<+1eY%r8`zoV#k zC?f)Mzn0VfH5!R)Oi{amqJk(1lT|JvYED9(u8PU2)h(q!<_Qd86{VE4!sIC_dppcdW^-zhz9UHy2Rh_K|K#!S3Jj2jcESkq+w zUa(cY6%yeX-HsaX4)IbQ6~UygxExVnXJKei%E8QzZc(hzIlGN15ZqkHLR%#K)~n}^ zE=J^V-#L5v*BhxV2KW*`p@)r86x`sw^R4=R(AkA}qwO`1l(O-!?pJ2fJ}(AIRkQXE z@}4t^mN8${CPTJU6M3>kLb$gM3E&KSg{#I_KImP zc&0*2tOAZc36bF?8bUGY79?~W=^{26MhZ`Su-q!AUR0BV(gm$rzSw8_ycZG^CN-G z##91lQ-9>xkHyycLO2Hm{bQwYMh2?45(~RlG`|l!hVi-q;Qw`J|AYU$TNHO>8YLvs z{?1qo3ZECp;s?9M<$Qbtxk!}Bk>8IV$PpsR0>&hlue*j_1lma!(J$xumkB z%q*V8eNfZwS;DN9h;v@LCn77Gm?RV3PNP^GMWV!*9u?N$?_m?K_eA<+g(q1pj@amF<2IB(hk%#?!Y!W( zm<)QxG~L%~FMi-w^y&m~%J0XVzi^<7KDh`S;Z7S7;w+@X%j}?qLS_l+>AOs=;B^~O z%xVacF9d>BdsNh3oq_H*Aa=!qj>eV}y}BatNeh zuBl-zN$%j575*}IYdSM3r~}(t_D-p*G#&Sdc2}=m9pZZ!!OH0+| zrJianLeukaAHLPC3`-j>sLMd=7bn4g1@Bh)t5U!BcclPJhOt=2u`cxFH&+gGr{^+! z#U2$xLPsu4ps)b?Gk2=W=us;sA$x~an=G~Zj>m%WzCn`IcalN=cq5jP!(@&y4nYJ& zfRLO;lNi~aQ`9m7=&3IvbWGZ30CEmBVcPV;qsGLnmp+UdQZW&A$WGA>(1#Ej>ld^$ zhZ~j{+H-4IwdU2xFuqo+*^Jaz64fjLTYOFk0s$INRE@W<`gJ?v@MR$1RqbFFzjaKj zPM5*u$fUNGYNZ;L6CqtcYkWh$S7C!AfI6j;86bNvHAd~V$LYOLhW00=`lp_#U4C|v zjA5`S@Yp~)v=hcBrMV9^QD^R#Uu@$puUQ*8XfCQlgg7Hh0@ojcMwApOCuz(jmYMiN z47E0L1XfeY~1F!z8IMh7r)I+=?)uQ*k-pSXNk@QfcMbfC+~$ z6^=~aty^0KGPqC>%yP$gh4{#Qa&gyu5mgRIR}RFQxT9#q!qq7yt246dmdZZbG5ZtaNVNYzLd5%HT~$ZG#Su}BNb_Hbb%l)7jQNgFaZG*GQ1P5^A-+R552= zso=5eNdjvD^6!P8mgdtiBK(fhgj?jqw z=pOQ6#7l{TdbRiyxAK3hJU}H`Uie zbj+HL-v;5E(~u8#*U;2{99wk1Y6D_&_X_WUxJh3lk$YXZ=3dBlwnYynl&sk(GQK&3-Drw;zQ0|R-4u2-2`Z+dl z!(yzk4;NCA?tjx=VV1=b=mQbhrm^hNzYoZZtud}dRcy~ML@B`5>SLj(Ez<)iTo7K## z`z5nsbKhyU%)+d9CS;L(OZ6bEf3$uemWJJ8#n8;@==OkemU@t7THwmCh(pOA4u;noifiIpD#)+j2~d@?3HX6Z zv&w!A*(y;0L}{l6e<=8}fGG0@S(A~`svh$1AsAfL8QQ}L<3=rMB`ZbAFU4ZT_@Y2X zzlIOG@EnsC=_VkrSN(B?9Lll;Up{G|9UtkR1*>6$T=dt<h#8;Iebw$TW|>g8O@A=M4@WN&5;}v@=UAB{FVbl_ul> zaJV%wY-L(&tPe;?E2`!?glbRH|337tkaU`S=wE-M*@_oznpq!eekH2q7FEsam+(l? z7-2kGbKb>K|BhT+X5HFoZ$E)^tFdxwT1RAqhlE6`mB~>)0}E+J#9C9{uD=`~M~mV$ z4ot`~dDIFSG>nL`vHq8+Lf>h%%#Gs$)N`{+ykZp;aowM#jCa>tT`s#f!aAM9J*dtYo{(#WIgd*?ola-|LM(Im3#^Bh8+%Gc%!-hxtf{sDMh` zz8D6>H_{bIjx|k!EV5@dPtkILaAylt#%0kn$(?M5Nj%r2hL5^Q6j)=^^=Pjtq4U-v zULEAOOt7Hf3h}gvcP#^eG99B{3oe}1>s!Q3)!;;Cdp4NxYauaEER_Onuw3^ijsq;o zv5N9hyis+E#DoUiF(zJOWVr*c%^e5Vg#l+&3CiH0@GGNkJy(s7=<(>PyViQ3R@LTM+WS6zm5XNyy?!4%Y%A*V?GlBR*0q%`>LF`!rtxP z%%)xQIKR_|F@v21yY_UB|9&~V3R!QymM@m>h=lwZGWeyGEW{3Zf1_)nBF>KDn@q1s zm7puehQIJzj6jz(W0YL7-Z~T+RL1TQMIUP$(^9d~Wy(Y0S{1E*bX9}o#h+{~y@B<~ zfpUxB%(GJU)s>LR+U%s|Ow6juVfs zU@_^{Yq?A#i5OkmlQ|1pod`$c?8llR2&B;K!k>aS*tRHQ4!d2Xl=FwuzQB_k~YEB8R(o0id` z6@e*pS3*sG!#SoW17ciF@UUxTtMERuQJCuv< z%mIlbR+`Wq*K$+ttiG7xi-JG+B#xU;FYqJL8eQGs94q!# zyxXB*VE&5=FoG41DxBCu(!LkYCbZPpG{n|we*04G*Wzt3op8JPd37#_u!+!ig=}wk z5-?0GnWOhywMh^%*TPlxy>#`|H)VgV<7=^zo4zJgdp6@i%;w5bV9I7e3Kxlr|BD{! zoZc<6iaodwVf3$4DMix&6*W5euRKl7X2VH-&6&DG0BGp(3RzqRq{MsRn%QHm(krF|$MKI5@RbUS;1j9k;o)vd)$L}9W)Ba^Ug{Y50v3b&;HPbAD z`PQra#CW+QiUjJ-kx)}FK|=wh+^lEUBQG437$p&nh!X9~?rkw`$>8q(co%u(tMYcK z`rrLSfU;gr@A1-6tH5yO3f6QF{qaB#51YfOEheB^sDCQH+66^u#M6HI6V%3tU@ih- zR4D4T^Se^FYxkz$+Hv8!=bFZ49~R4DeAb$v{OYIG6%Pr3gJZpO(&jERy`uW? zz`@AqTgs&tS%sxh?{XZ0e93OZaS?(J_QInQAasoE6O*s-n=#LfT zk}WDDt4Kvd75)TXB6;mZ3Y)y&<9R2oml9`U*DRM|mo1~ptZUN9Y{87I#a6Vng(Z&? zV@pS9PZdil9xZZw=j5COjLODqDM(LOxghJGdRV&Zd}*$d>tB2WvCu3TGxN6jpE1hR z0HH$)s>*%Fl6f`3!8Fx4pfRk&@lga)>igZY=!=zHm}B`Uq%zMUauXz8sRA^YY3$g z4>x3*WCktpN4FN%bhV!#*EhX-i=bOH09A*UWs5wW*4$D@Ud@OQnz8CL38L)xq&*&s zf#SGb7|}TL=@WS5r{@|_mKGs*f{=#-h~~E^bq-K1{P=&xx$N4cS|*Y|+8AmyG_1!G zWxXopUZI(YvNu#4t#sRNH_0yHG_z&;X(xM!elFhy#=z=q&G%FfdKF_^MYU8u0Wx7v3&ix-(2(Zq#>D6{(krCyJ`DojvSj+a4Lzwsx|uV6t`4N682vs<*>E zh7t^nNDoT%iKN9UOSA@h?Rtz--;0JECevnt+1h0(v+LM^{#kb7 zy$KLjDY6mVZ@VnOD{x62;T5>h7I8nbQ?t_X9$EXC@5RQbX`1GSH3o zMEhPJNfRPMB}@Wq<-9%sfVyXj8+ldBZTHa}cHNVqr7f^c3psll+k8{$6A|#ulQCNP z#72Z7X*s1eTA5NHx(Nh^WcDo1hS|a_mbfE9(+@R*7DsgPVRw4Qlk-v+oeJgg=Pc(d z`8g$I^P%g{qvu0GcAj(RobUUOX=l4u-HUr;WiuSLBMgIB9Zj7TfR>{$Y7e!knxYz5 zm;*xrBJgt`$|uLsAqU2QG<_e$>@UcXYve}>5Q)(0R5@x1i%CpwQu~?S!&XPRQGY^Fw3Do12tij%-nO8Kz}Sc; zUY4cu^*lNMM5iM&;6R5jwEo_Rdszvvqmx*HQK(MCfqS@hZcKV`#%BUkH*d^Iw-Dqw z%TgDQ&~2+SOg=O;vPOX#viBauVxh019LEM@16^D)M-*;?s$?`$PBU>EBqJe78*Lz> z#>l&^WITQEAsbG#SsJ4vdA%^1o3GnSO~Yl}PO9}cW%^(It#oI7M=5E_K(`^r6#ebs zwva?KPwK8$>mvNi=c(_c^m%s# zB|Mx&Gk4Lq`rCjG&Jo0Tb|^MUFq$+iGejPdVQ>DwKI7P7%!tbn1R1Va-*IWaa zw_jn)LyEtT&~z*BfSNFB+BZ@jR$$O6J@Dl%m_~UYAn{$WG=|X+%P2rrEcoU5)}Z|G zw=w%)f~}6tY3+cVp-Z+RG_#P`>;8oE||$D#7UXJ7#oF05P68^&G`2j zogJM5Ktsk}>7oQyGHVq{hlzLi$B|uu-}$L9ngD=>cX5NhvYWo(BLmPsgIu{OcT`!=@#+{MvCZ49k)J@9G$$>sDeXD_0OK;35%{X= zqvNdT={ER~w(y}@=1KGCVR6+J&ipneGX>WT;F2UUYVv2+!Mi%iJn0{$uk^1mu0uxp zT+{{=RbeBeBOUax;ss&K^se~GVpOF1*|Xug!&y+;XaV5NEk%yrsMFwyG9C-sIM>iDq5}aiSfvoIEmvF&?j|V4_j6axWV;270QXM^gq245qeQz zfSS4_y%8DEnruf8$mwd9xTWD79m`bRi&Y(Uz@kd{@dt)yggVE9D+4>dUL$Y;$GV)6 zi&58(w(!zg8y63gqZo&vEb37i)f{W;g!wPWOqSi1j;BkXd8*-E{_B^huUM z=lf;8(QvOw?4RA)7b9)Q<|am#i0Zlr=|A zL)dg7=#VPRgMKz~qd#U42L$Bx0G{KdP2oT4qdlL7iZ+r3^qD^iKh?#Oz12K5vWZCf zAYS+6W-Dj1kSPi<$ESxz7mW`V^DLGv^pRu^HX24^qzYIeJxQo%l_5{l6}EoJL^B+X zc%-{ru2L)^C$&;I%I;MNOGJ?Kf6dG8MHR?x(T^oz(=X?eFvX^pMF^*J0xYxe`BUZe zf0A38DI{1!dI}MX?uV>r)q|OAdZZg>-F}I&-f!hil;^m*7)k3Q+_|jnHGVaIJp|$C zghG#BvtkA4M45U#ZLO9xTxc?xb(L1kZGQI z*g2$eMw*V!i^+O4S?zK2RDdOK?ZwHRzPgUeta7?qSq*+!f|_Lo0JXfee4?C3Ra})q zHCZ)=#3?VMyt|Y#-|QLoG7?YCN6FhWn^SDeF$9sP@aj4z@B&Z z!Eqi-o{X-Tu89s!&54dp-R{RYpt;iP1p07Pb-I#KSGTR*@bOxWNaJ(njH-^m;q7qI z-oF$4j%1{)Hkg7QUs?Myii4 zju`&)WFKnEe`bB(-rfJl|Ig4z=0O_Im<>zMa>8nTm+44?)vD#|*@ls>&8Cg-pqf=j zpId!XyQ}qihwXTs4e+kveyR;`c{(l}lwpKu*qADF?=B}Yw1^J|MPNp<22 z=)ti_-6KwIUAlT_n2(>IOe4nEw7`l@_A!N}g_BE?WC@VTi6{G4K#|j`O5LDhWR$Cr z5*+QzZQ*Jt-nr`ah0>#5g=x`XAb_54o{{=_0xR>i|Xg`SJ&_ zA*{Fe+31p!`5rH00ZJ66CG<&QPKtUjR>eLuYVZ_DheI`H7ImVd(x>^uQanGo%Q4s1 zO5ING_c9Q+Y?-~#I<*WZ6;;VvYc>lDg(whY>=Cpx< z)@Q_?2X`LiX!W1uPmc0~5&$UB<+*tHvROFvYalLh^z%xEg{)CW>q0fTS%`OK8{~5N zmk_R9Vh8P4J~bK%IV_--w){l0;xJ~49+Y}$g($+;D@OBTz^*yjWK28nWyoe8i|iT_ z_U5yzD3UWkyNe~5b~ft4+0iMi}#KTn(}`1pye09s;=)dXOq> z`{kHC^05o+jLDxs5ZQcOf$&Boy~ab<4kDG8c_@|~{oF+r&{nz%ce~skp!#r01S^zC zPmq1qVauGo%3>8r#-!G~ETnMF3lZwKd)(;`Dq8uFw{>XVGf|Ig!5S+plzVVYW+7OO z8Iaut;cX#qj>(8K`vBmVBb*v64ay!zN4Z7Fvc`ivWG#W08n83T6EE1w*f7N=n_+N2{v z99D?X;tfRl0;0^^vU&--fNeI^&jyH1hM4&)4?in#Y^Ve=n^3b6ZNTgEFtlaw@Mk$t zB)ig%?V__igtNj5+F@&iS>=(Q*l(kmkZm0s)3E48t|U5i!hHRQ+91_}k1V;KDwIc% z#G>R&Gefd$`f&zhQBcpdY6*;zh9xJcCWt?7vu5XT0l+-I8o8gpAnBgUeslYde374@ zft^Q7{;apadP;=6E{X72q9AVC&prZrM_zoe`Nkc^CsKe9na3L546H>Ud{^3wK11_t z^6dIN?T%;;WhL&TK>QxOb;Ag803jjK7}*R=qZ0_=L!uM(&gPCwn~qF!nC1^vB`0bP zRu7C!0`T4Xk-UyZPP25xYljoE&jmDmuky&$wTdeZ8Ox7YB-(MH9SEG9GwU)T%S|bv2`{<9eB`-EOvGIV;!RUetv+ z0XA2h2@A4u_Q)9aMQ2gMuk z6-bdizFwR`%gKWmupDyYJ&9nv7>)(70%G?pXCj1jQKwf#=xg`X8_i~z@C<51gpw&3YVN$spXF7Nmbl6b|k_{+RKHR5!{E|yj9-6w_(-AgWB;Cn*nc&E! zD98cvVlpd3v!?1enfTrPO%FbBUzJ5W*tMvFvyidVSUJc*X|YcnwIuSJY(XZk2>_ur zXDF6ElN(F67(AWtguU!P&0SE4saKKL4SU?m8rkovqK&v;@e?aUTc&EjIw`Q3m%kqK zygGOZ)`JOSC?98VW)8$vMK7{}ShEJ!?9VhZ<*pog%91D_)=%ywS~NYy#!6JHro0}V z6OKSTw56~M$@A1*Csp7SYp)X7698o{+66O69HTW<3vZ_jYG`ryF$SBht{K*tI>;}u z7+u?6Z3#|el=~o8A|HD2$ErFgs+j43AocJP#HTv^HHQJuTo1a}`7Mkh0-G%J9PdS! zQm^?BTgYli#XnLQ#_S^%BhVBI)xkBzpxcXk>PE7*;mvj3)_tG7iV(Sq9sq`+oQfe1 z)EFb3is3quE?$z9ChXxC)l6nI*;{Hd2^}m0DI7DVIz1CA%|hM*)Ox~5+foM7iZGASi()+RS?9HR54fpvOHCC={z$*+&rk%AZG6xwmm~kb|CIMo9g-u^UB~MKuWCVA3WnW+vvxwFCMXUtb5l_GW=v`N0;E` zWiYd^7>7X&Qv~!5c4phP^dn9P`+C_WSD^7P^}B+F`Gi-M;_&o==Xw<15GgEc#?OBMatGr9zW_z>-n81LWe36oA1WTZmDD!NZTV}t& zQek|We}@S-Y&gMnqMLWrW*Dl9m_XXbnMem@I(@6wa8p{BLB%+Z6owHpD+J?8)*Iuy zM8~>W6Cm@E@wYZ-xoQ8%qlnUI@Qsnyj)4DyToxqNm>vLALgk<8Iw_aE11RMI?usnp zh|S8T@jbcIVVWEciegaU{$M7wLlB$|Gh8@APTg6n8j^`XqqqTCGpN~t@ePI;`d{a2 z%n`R=2k2~53#8LiM3Af_7mw}qZftwd7i?0RzW}-EvImybq3B*#$2uE`t>?|}Lm1m= z1OOHxdC=GEd(CbmAmHw>N9>#SW7MUn5iM;J84dEStPmNfOIeebNKBK9n+6XRI2c(})6UdSTUnKTG3GR(WMXt; z>YBGyHWUmi53O*mtjo`33|~I%S^OaV3^ z=hk(;gNqy$R`Rmr=WTaqkqJr`I!dr9IH@X03B@2Pjv=!uW&1MG$wS4(9%>vGWrO_? z=NV(Nj%bzU!GOWXzh|@%g{|%7-I(RPKQ*<9($-AM=@uP_w3G>)q%>_gN+qv?aTYaT zCMswtwBjq;SWp5;O1Bi$aT|+^762Nc?PoIwGre!&H_ie7jA(cL-!=ED%^wyJl8f+R zE{I1w3nxVM<9`f0cd~!_$B-Zyau=H4FXaE)&G&{g*h(h->(u9b!~&vKK@>_iS#AN_ z&ZqB3_UCasc>;V)XedkckMuqgC&T>JA7NIj2(Q#JMdgsHo1XGxjA>WIH z(3_Z*OR1uak0eM)ifsXfH_|I2N2E)dN|EXm_w4$?$|^}uvceK8TgrW|?4g+b_x0bX zlP(vWPg#H$r0)-}_ft17|EIa>nbf-R6S@_sH?Jk(Yjc5zt_#SkawC- z+IJj*k(>e{)3B54l^!;yCuho(&p>Iz9+QU)bvcxG1m!72Ho6u2^hIQ%agAuXb)kS; zUZ4k_K_?QkASiY&baV~)5Ssrjy1_@Rp8PndI6lB23l!TEr~!dJ4zdpmY{g_=11&^i zwgZjs0NHo-zjXt8AQ|}JGZTVFPlFHb``>~CUw91oClOh2f!sLEe?fJbb`+>FgFz$cP!!01vA-jgK_?>f7A!Ur$j~szJ`w1C zrvI%j@P*>v61vh2SAVBbKPgj2d`8dKm86LPH4w4!KnpRLy+8_ynDt<=?IEIjAfunb zh9rlLlD3fp9a#+~K%yc2>)@F=K_ni^0E2MY-k|%c{*FQhJ`l`;aM;`aTZ#U+bhuC0 z0wJ*=(et3jlm=PQ*yEtaWCk{nCIbGBdW8uxLq;{c-=>8vk21;0waJ zVbX=G9JzrUMD(dY3lVb)*pL^{0yLTs*owh?1sUA~8vPB4O#~f14LW4(e>)wgZqSL$ z>;^I<3%WlIx-aj4O9*`7HURjbFn@6we4sP?P5bpr)b?MPJZW=c-04qgM5FW|^>Pz1 zpMmZ>0ap;Qy+QUFfiHYO4|HZi@Mx+srHtYX3q1G2yAe+vVA zp#r`z8hoT|O^%#)$>D#>no;_Oj~2hF0e~-fK#w2Hf>7AGP|-D@ z5-V{N%NI0iQ;2mD7u&1+PtMKJpMkbUJtpg`H7ozN5svu_jCl(T`x`D>2$ts~DspUu zdUa#HB7P3`b!V$f)YsQnw3O8O`CmzgGo4AJ2+KkX0^`Y_pVRT%NZ%jEw0`_HdJ)@z z{pJMERxmUPzzjX4JRP~;u84Dyh^JJ39!5Etq+Qr~DBew0p`?9>?RNKOM;+t}COhK4j@FtL7l4L6k#|5 zf)xi{Shw6VUX&!awydrkbRjNIy+Dj_RMCrEPkv~Q6KRG3)uSatQw%Gvf*uz7@(HAk zFo`$q4S0XPHpfU3xsBsMSRq=Wt%Xz1csAaKhZD#PV1uBw3lYH{y652V4l67O7j;LNi(*F4r@DPwC%<&rcHzQg>o~AhYC&wS_(n8N`HXH?0QAV^s7sq1xeaVGtb_!_ zh}~CTTw)yMOBbG)+HlYEr)%J-m0J}5C92Lu;KHjOs>c&6%S$fr=i2sJoXTEr(T|lM zID&*9Y7$%_kl`s#ij#mz)sY$h`VR4Rw+ma9!ss0te)wzrOO@AiJcs#Sdw>bv)B zAIjL~!**9pq}6^am3Dul*cjG~vJMH*dsAhvf+f8jPj1w47J3BR5)u=ebZ{M~Xw=Co zB}$%svWShuRgv*m29rkM&k3ON*B$W@lQJNu5iR@s_cz8IBD}(Ew9LkY0qy}Vl$8CA zW{Ab7oa{C$>hfpv-uyu_hAGM z&f%di8El9YIB}K83|J*a;~<%2t$=}ofq;8n9C9kNIs?f>vVzqqaV0N#Qbr+^s*DWe z1cH-0TrJkH=&y>TG}!P$GFnI#ESw1A=qLqMj=2sD{h26IK2lUS2%mD;yX83O{WkAr zJB*_O&yw2<#{kLlgMhVg8TkqT>09P|CJDtwPh>~rI%GWpgtag`CI1j6u`|pc@FnoX z7Ta^xQJQ2@-~n}jbiyPl69jFAX(O(prwHSCz7Nh`4$)M~q(|wujfvWPBQL~!oDo}n z37)CT;7rei8OLO)IA<FS4i*3MBZ3jgr;)R6`A(>$1zZ?o|-&_al+FSvzU5U>sW=HLXZN-@bDV{ANsf%U} z)DkaK#chp~NB}dsvA^wig6a`wC|LxPW*%OIEu~6FSkZ=ZMlG)hbw^HlzQP`2A-aXO zAgqjwFQv0=;!U@D!lMQ_a5jaV+;Y7q(rjQH^q(TzMIXb?;ckG`%0N`bUi`lJy^&id zWNiPs<&^ZVCx-D#)u8J0>yy}9`0Zo;rG$4KUL>J-c{r-E1qJ+RTRY+Oh|i} zo6EFJikPe=YMfTs@tFu^HR=t_d|gFWqzBr-^#XS`Lis01!8I?y=o2PasJ`fYhC-uX zNQRmP(J2uyQwtr^LUoEx_R;KLIE)sJIC%IM0o##y3mQ772+~fMc`&} za@$(;=DG|M3=>SNb>8&NZcbka7Vs4GwnM*%y?rkL@DCv4LE}r_;vR^uuyp)q(KbgQ z$reCdA;v)fA!9{uD>aG=xQC|$J0aKGoJb=ksR;0$jS(AY%|-%MaHQxi!I2>qrI$b2 z*CJ;oGn|Cafgr6#rU6ik&Hm5OoX`4DpQ*br7FX+3aq)D4`DEmupXeC=w2i?!G6+@C zKHfK?eg@Gyc>R!78uI60KC+HJ3R!`6@EvhaNN&FY^A}fYvb#wZc7%46%l!Px%pf)r zT>U3x@gX7!NUR7DSFA~qWK~sgs9257gtKz|)F{Cvq}Y_C$tAB@s!qZwU^9cnB`=eiyB^R8C3sa_bO1t3Mwb^k&9x@~>?-8}n! z$)*<8AWwbXRkRU4CK1;rK8zzk@QXlwW8V>Q(pt>9Eb(y`pdN=Xc7iAEsuQw${NY$w zhrr$p-mTQ!=S|N-Qyg|Jf0{&Neoei7D-Xe7{`CB}QA8O|be`vrPq5r-XJJ-z9cpNb z(uX*!Y6^Vdsy1*Y;S+Of;#@@Q?`Fck)ENU%DTc_q{f3J)Z}=c8{)T5CU8WV4@=F0= zvgO+``P)rU?e4J5w1@lFHNx^~NdT$KX-Dh6{LcO9xt!~!D`EOo@2TpOUX*R3NGCF3 zA*b~cxMTZ!dn1wvae=f63=CJWnjVfvR0;$_QPE3Y@+*U(Oe5g8Zs z&pT-t6lbTG*N?0$Pv<*lST#{sL>1>?+Us}e3$(Y@hKKvEhiC=KfbUg3-h@CmjCtAl zg?wH8?Vq|`UbpL90`(up(|eqf#Z>?JtDlfK+`nav`&T-a+WNnQRrKB##2btzF{a-TpExj5 zN(s%neOr#sFOQ}X>E{;oUSH3*RXMiUzH7^#bMRp2YAkgL%e&fmCq{Sp>|wcjjHRfd6%SG3Um+M9bGp9m;q+H=Zfx)MG5+TnGyOK&Y-y=UgGweRWinPy4|fSwgn$3WfVV;wiVKQDVk-QFH`0zeMvd+)^E- zAq3M9{1j*>45MDz8|$?iE_Sq>?wK_blIZR4hxIbSdr%(R-gM7>n6$D}!16HX{pWXL z(iN+`Ll>kL`YIKnm1XJ;TLF{6r{o&A;#>Faly2f@bmB6NsusAunJd=X8qDS?7$%MQ zL^SL#2TgVXX)xf5d2hVq%>=XU=Ki?P{=Px8pHuL3{t9umm8WD4Hd-kg()~ZLjy7H> z+WA~2`CYYfj*q)$YR#awZFTWlLxPxj+_um(LvfKKclD`-H;E0K4tPF8AlF>(xqpgc z&TF=nw8vXDobzjPqhDW4=x0BkY7~z%E6)|P>uJYfB^B1=8VSWCGlmTrvpQ7|=if=g zb#;cfJT^Wi^10MJ-l@o4*!sj1_u58Z@x?;2zKg=zX8YJ>7Kt69y-b4-x4(rKdkcSm z*t?oLFSgfpE!^xn!vAZ;8Pbufd=}!Kdq7`=;4FE}1d)gtg1IS~tkM2WJV^ zmE{X3UyHbTXIUq9qJhE=^4t6s>sM-8XG<|{+U7MKRvT+%%e=L0i<@qQYWdf`K9AGF zpY6~1Y=+B>?_V#3DclH;t>36ict%*OuW7J%+OPUd=+G%Hs9#yvN|yuF`TRZh`Wc2( zR_7`!MmL!EYNxjoxLEmyihR@tO6Fc`ur7Qnod??rE4p{w%+0g3%GfC`-r3B{RAZm> zU3#1Hg@OALH}GI#Erx2Fy4dH`jir+(FHD<_xjSv0?i--An~A#tIQ389>lm!B&2L+M z*ga>nK$m=v{$$w)CB)nk@a+PZahiPdjkw1*-S%J1S5}();umChq_t?sfj1%{k_B$NH@~liP+*J8I0v>Q{~43zG7UtPQbpN!=~F z!Rw7)Pvf%FHZk8CO&Sz3Y+Oln+vW1SGWy~{|2zLK$le#n#8z4YuQ%|SM^Qb8rF$J| z43c=P@@_1|=lV^wiiBXVP~OwiJ99d%N%4k{?!_}bwx#4dKDe!y6#2v|>&pNt@XUk|W^gPvcsPdXCA z+cjnW625^Qddm^uS_k)v-qh1?%l1_N2nAPxt(*Ld+Mak_n?YTO@CBm`igSnW}CPE{e~_rPsDe@P4G8^ zsixXei=o_TdQ{T|hGzi(qcPH;;c|6{d)ErQ$Xi8^+iP2>^!6xBw)d4mr(;(5N=dY< z)+(-v7I5YG-+Az8n!8Ie_x=iH^p;F=*$fNVD2!y1hwf@->a)oBK=}#_?BmpK^K}kZ ziW!c0CZNc_L_Zwce=0Yhcw@QtYRGiCIIi1%2qSIhnPW$xW#a-|I5`x*fF$ozlH>l| z?zU)99*Axa6@FTxT2A~vbD){zfTpgDv%$rUi+9AQuZzsrp3BF%SdYiVR@9U7-gGf= zP9H|)eAz)_1#{-ad|iScmAel{s-PVk4AIM9INeHLK8qwtakNllO57NQMpcYedK!yh zdR3mQax0|8ao(i_zG+VGjMgc7x;LhG^OxkL`^ay8%S*l+Y3SRm zws=L07e0lDv0RE)Q#t+0^^O>3+cd@H9boBZw-lHH@kxCLv`3@(M_J2@)f!C=v`B^K zhUtrCw2ByUGQp#+>o+Ey7Yb_wlJQMaUhcegD;;AFVIj;`bU9>DULJoLKm#ihB>TclKZLHWEFDy*0 zBE%UbkEtZ~pyp(|Nlo&4odh12VvCcda?Ilt12p7%U)=mS4W=(jTc*Y+DSI)N zK>jfDb z^B<%vl}qDh!b7r%|C{FYt!YZu{T+pUQOrHt>GIrANWU74_F_}AOI0anK~nw7=cAOL z{NHDZWX}U#>v@xy`}f$ls{O1?#JdxQtLDI!9;&VlkM7_!`~{ScgT7&QGnhJ)Os$l|N7BgGB7W>AC7!(J{K=(Q6yxVyB*Ja3{nkVvn#s1;3zS z+(RA%Hus;tYoT(KwwR4*%sO`*keAFNh^Tu=zmPS}+wZJesk0;)El&f9U z;a=gK|27|TTEygx5P9%h&xw|3jn=6oYfF!BAxG=O*n_6yFI-Hj)|Y(FaBaE!voYkm zg0l)6aNhc(`lD+S16j@`#-ntXJsX(#0(M;`Dss@zW1q5LOI=cqKbhu-^qlK=4>NhM zsuwzZnceW2X1|d#;i!B7G*lrDTQ$T1`_~tX4?o>+4xc$;mQ3K%eoPTS} zc*7g6-NY9SU*`D85aUY0uG&N1-tPilbfV8nlj>@+Z$w{gDGm~z=1*^?2JWSMDNIfet>pq2$Kremb zqZ=>&GLjKI9wm1?ge-(R*Z@b@c${R-5+7^Bgf-QE)$$2k6ovRi?%Gw%CuAM(89pEF zr0zkd&259*rJ`-LtEu}2nZGr8ZI*XP&=*jup~yQ@{rZ!F2XkNVRhlmb)3LXQJb=RKrSV^BHeq2 zQKO^fA=?9-O3Rh4kU9J&v+H$V#Fr(z9tle?)BLST@;Rle9Xm^uVQ#U zzDW~5u42dUzFPX^mRjN?alBnf+IhQMm!pl1to2&rBE+f?Ktc z=8aZAJm=O~teJg43mlh*98;7Kj9@eY{3L7y#0W)j^9C;+jIOr%Y^Fra>We9!?-#;v zr?fD;;N*_X#mPVM2%nXb5DB@sq`)kJ^x5}RG8XQBTPI#vxJ7u{cmJx*4F96{+-Aok zaS7Hd6vZz5UZU#B@a+4E85U}=q?t?$mM;rcGt%K9*wr^1<)2KsR%>sP#h(wU07z7y zE>lvQYi*CUM7mpuIBaly{kHzR>3%I(JB{_5s7~=Eci=__!A7>5*H2qV|_vSHyC~$(`c?a0J!^5__Hb}UzfHMJj%sw{pCywz>*@y z_hP4wHanNYhX+!p)@6EoXNjKm_Jswn&N9^)a!mGI=BvpVUCIbBo7h>I0FM4%pz!=Y zi$&%6lsk+(fnJW+BfjJln!I`iYag|8nRQ+q?fKBSfaCG-f5`9E+F9E%7n3!6IX~!N2e2$$H0tS@Q0c!K`tMvyY92+ zzi`E$I;Kk3c6C>5rrata8O@eiX%h;WMZh>fZ1==?&)0&khTXt`SPrQ55 z0D4f_Iqk)hgHq1wA)&OX?Xal(SWw+S!~3F!-n?E1=n?h)TBd9)8NWVT{dg!=w9xn} zwXA_BK-$F2Z<8R(MhZuwu#1;LNJ<%`2|pBxtQ0l?Ig@};U~KXg;?E%wEm(R(&{Gf! zQdwmPLXpzRNJ0>oA>h+cHsvJ=HClf{a z|BATApQx+D1;|1m2t`FYYB%?rHWoMNsJ$~_A8nGHZH@=x-S}Rtyd5Dv-VvXf3Bm4y z>Y}1NB4U;x^}M6h9A$;>caYOHiqq|;Gb4`cX$p?*b=mih!yG zoSu-+h$~$hI(A-ib)QFyV`%q2J7s=+jCrFFM+{i1t&zaNz-$T9jE(^Fh%`S=1tnUE zX+{9{U|1X4xHIB7Si!a_I+a}33f>us0?V;A4yV$rSn_tpZIp2$yyNGUn271rCkaAhqhZr>|AJnrw}?E6})YIkT%_t z#woV2M7W~y!yx@9OX=A=JSDQ8O+|y`HC6y(HbTKwbiHwOkYRh`$ZXtgSAP1&MkGo2 zB$zuw&=nfnghNuaPaRU|>2fD%Gh$0!MWotLNUmwTscGN}=7eH+&!m89 zxCVwws@3^6G(9BD!Ut^GdVJXb|$-fW0D3CzD7 zsiN7K;Pqo?7!UB{VVFoKP}Ik`;7|BbDm8B`DeN3!m<;zOQ)-J_)ZZa%XtDBUQEH#` z1~t6Y_h#xDx33K`pEM-UWwow&(>FA`ED4ysOec^A#3|w9yvd~}VYQKn#nYp0=8JYa zbU>@pqSp@1ev1hKu@~jFL_gXN_!1cT#;yE9bQbxr=~rN#3Cl{}o5!5s3JT)}j_z6=&tdEz@x6 zaSGzAmW~EQ^Wk#|Q~?p~i=qLs?Mq||u2Lt1hW%G>`wXtUkE(<8%2%atXzssj`&nF;+|{t? zYbb;4YH%GC1{!qw&8yO(T89uovtO_IJs74**Vn)TCt~!=f7JAU829xC%OJ$I!`eYn zV!`*1cA~{yLhFXNUjKoILsa`aViYYcuAhCFOMVi8!ftR!I|+JU)#+yXds+bExDreM zJ?766L=7|7|21Zm#!p56oWbsdzh#csFs3a1`0AScdTpFs9~l14$RU+8CoIq6pa4!e zZDHX#R)@78#=R#cbPV65>0OPE6B&mm0wzQtea?$`o}~QB3lvq3SsVrD;j!maxhE}5 zH3OF*$3$9$QaKVW^g8dHf{^F5T~1}|L!XXrq)p{rLECnQT!qlswmYRVyP=&sLt7zy zQ>~y>J{pTN62YN34z^ZxBPI>By+o)Djy9$JrqMBR^tFQEWE|&}R!$=xM++^W;C49< z^`ZUNAR`yr$|2wSWf>)2_zbt%zr^-i2FBDt*Q`+ z7Ryh}fdqdjxdp0w(D6W~e<`_76@ra$0!afcL|xGx^dPQ4=CBaygLVAkAnCY4s?HkHwj;ztecxBdzZN=X%o z{MmR=1Pc2_SX!ouvWpOhdw@k#nLkWj6poCre|X>kJK_Z515z`D{nQ+ru%aSF^g&^F z0A3U>{0V-2^=FtSY}o5tRR2R_#6g5J526FP@Ggw?*5F+(O=rQz z4AL|C6p5eetj^Qz7vD6EzTKO8WYS9h;Ck(Z2td4GBh$B}){G@yujMqkbKJIv2-?0} zJ-A*I>5Lr$i%+!(50b7}N_F0@#xYK{`x!CP5R=uRWMqCyh84%1(n@W_n{Q!L5MqF% z{?K+P8K*!G96eDn9DC0(f=+j|N+1<2wu3AtXg8S9{-uN@Wzz*;Ck36MkE2N{gakfq*7iGjc zU_jI$6cp=;0o?{<_`wGcerG!BA1}Z#ung=s8w82`%?5=)f49LAzu6E_;$6wVVnB!? ze)*Gb;L4Y1pHUhGGNs2IN}`>3i!pL3`7?4>vR!qzfGYG1P1;s2n2>e z{pJrT^DAi?BAfq^{%iVWV6fk&i})_VD_Pw^yMF8t;^6t=FyfsnzwLik3ekq`od+?q z%EJHD)VOfag#1&}iGwC6`1d9#Ht4`1ROU}jm><3VKm&S61NsLVQ0xyH&_B?CVt>$p z{ecGTkOu4zG+@{tG+=+A0Xw7t`vVQwAr0cH|7{Lohcw`SpaDOm0sjLH_#qAWA85c2 zX%H9sZ?WKqG~j=r0Y9Wcl-6&(5Qj7nf1rUlq=EPY4a6Z0#2;uN4rw6%Km&0|1NjFU z$R9L7NEs|>2_ys{QNO7Sih|1gA^->&3`731#}RF)U$y}-1Pp=7{IUxWZOC7D1_%ra z{jvRM{$tZ58d2b1R0e^;AgEuq5C{wjMf`?C!XVhCB@pkymV9t1WBgx_T>h6snZvUf zi<|D=H*JWDg}{j$8WGT57UJz delta 90989 zcmY(pV{~QD(>5I2w#^gUwv&l%o0Ai3V%xT@6MJH3VrPO0Cw%7ryFa|^{j{%Zb#;y5<4}z0DDF7dWlOriZ4V*XWTpyl|jhBrFz|P5&q6Cir|FITl@ZaFz zd?|*IWWbxXNehDM_&iQ!1AV8INe}*Z3Pw9ezNX{W^Mng$CY-?}UX}zh9hp^PWO@ui zmI7Gkp9hw%v*#%%ODZa)1>b!KY(?H`yS%?@{~o`{whz6H=@orHRAel5YQ2=K$??t@ zmjV7Q?cx*W|Nb(=t#gx*Fw|y}bG!Ze{(jAnJr9giBTJ2=84{TvqCspB<`aHhx?4ye zSdOV5B|X#2D!>&f6os!LNX)x!PudIPsWrwcBkRK%OT&sF%7#t&nZ5LLcgJYJ&((En z+rQTn#;Fjy|Ll>s<|Ri-&7)DzBl9lrH;~^VxoMPjR{(YN8JZKPfe+{z0Hs9uS-3#L z$^)E2IM2D~0XKKYR_Ios-VqcOel_)avIsn|`kBLeIzQ`l;`emn=45?yV&#SZ%VY?; z--cvH#Uj8Tn`Dj0XW?!4=htTLKaCH20YaF3=$L1sHgc5~FuSvsxS;;opf3&)wU!f8 zW#12T%zQ_miCfkE{o%t)1P?BBkFqi_Zy@a%jYK5UM=yk-iWZQca?`L#1Z4N;b=0wz zid3B1mqP?RlS{ZaHA_^V+=Pb0qzxH4Aa<~LXxGdzka%xP_kPC!M4)SDf(yTJJ`AdW2O0Z4N^_;Z$AfwnGHkM`$%8YvITH=Qx5U4-j3` zMLJWhmXUfsx9>a8b4vk-_DHmkbHiJ+jq)9Rdiab|_iRF96!Kwd&Q6W$J*Jw*VsUDK z;&OL%r7dYaCn+`;*(XxRUD&5j>$2G)8~*mjv%8J83R>}1)30ILtH=V_&4Jl81tnw- zipK!ikB>J3{Nmg0dk7qZUR5J!3E)_`gAJD-ZjEAd^;!^GuoVP&i6jq#RYrb$`DPJA z-YC28ulFKQ0u)rvHzQQij~?laDE^ds1i`%Qd={3?B%^{!Lzw8QQ&`JbzKd z4M&x1A5_IfZV$lXwIso`%LuF8D@*Bx6lb4Ie%18vDoj+$r=!4AM@rp}JeJ_IsW&EYNHGs$^;+bXn&Wo=olTKNiTD{6Z;O+s6>`a1e;I7RO{@Y9*grPcj381fWQB0Vqml${MMqTyla!C2^sLGgG0 z`qaQOjH{xn;OnIh6Ry3Ds^!GjuLsWzym+D(}2C~qEF9P}s7M0j>a(e(l zal*fI2+SDF1xNOT=C0#K13xh$;TM+1a0?0>@M{!2;{I4S`4z@5 z1y;8qXVm5H!*hjNRnt?Kw}@k*#{d;eKs31IDKn22JbMlk@8y+&@&=#m;qnK8OLY^| zp8VEXs1q2A!XCrV&f8!ODNAskx|sY67QX8n2m_6eBQ4un;L@MO!74rRA8lz(>5CC# zaME-k0`mU3r+>Na%)^X^HzrijvV!Vkl9oN1({*zk{V?^otu+Sh2TUJfDs(qt`C4 z@fJ9iKLN<-8`B+ha=yA-c!W2!x=Or$ZWk`eO~VljqLbZmx5tmN4R4Hh1h3aR>}%(@cqh}n9tC}xS9lq@Ls%IWcurlJX`a-KnYAw&Ez9^IAmmdu$UAzPOgx*)O zo{p|2b`;$J!~Km}UVj5{KVE&iM&EH*yZU{#0z5n4h+ykq4l-_E<}W84mE@STFlcUC z7yHv+OhDR~1XK)+g0jA4eJFBT_ONMqB&EHJMv)c{j$dqPg#RP`QlOPw6`3+1Y=~o9 z&qJ#?v}ABdQAtfFMiK)Lcl4WUg@I$(e&y9|;Ui`2<2so?XyG|bYqv%Fj#!JBWFj!E z2{%#G!-otx2+&BIsOj!Q=2#gscLPtAM0AcUJ%mNpksBLrs<`Og>1<|EaBozcL8OMt z0=+$evE7A^qVc=nw|AnpKRX3b%)kf_a>48Er0jJ1J7}QBl~Ql4-i!X#HjI$+b<#ms zFyDqWBV;sDs)rIf=akAYKQur+RKY_n!fBKp2VLp4S-QhY#8fFCjvAA3jLUS+39RP} zfT4nwHVKo{#BJ{2b0=ItiTr+hP~j%%;`LpBi|Z^JhQrp@dVx4EW|amgjX;T$uPyO2 z%Edh=W@zHrHtS;((MqIuGa6sZA5y$3@53{47YWklBHjUa;i$^$KIkZ_937B*mJrE{ zqwFe72dU6jzQAI4nTV>=mOHM~My9t;xob3vD{nN?HS$=5;xDqTjxQUU6;0q?{UCU< z>2daXJ(RmYl8HPT$7l|GEWWNZ9;9^8V&oTsHoaiXtxfp36+-MunTnm?)Q)MMQ3{ir zz3qIfF0%tqaw6E*iu2%f2S)q|@00lFVG%2_GM__%TZvT(JQ{%wRXyJ*8v;X1l|I~p zc{Uu;(&e1V&^U`GsH%iwdU$i~&C|qtMz}guQ1*LY0T>209Z4y$|JN=tMysYoW*d{v zJ3U|_f53ByZS}znE(I#u9Ro8QAg1uRs93re%axeYE zuuD~I-*r+=k+5^3t}FBXuHK-M=&th!P>#;R?!TY)V^+hS(%?10`&irqiJzLvoFgFC zJY;U4p7HJ9GaWdAkLiYDK>+H_D8#afzGVZQ%9am@&|*wN6-Z>EAmo$C4MRCQ{&-`S z87we7_^jG;KJ~o@tlVq`ioK7DFJ^CEa~7V?1>J{mG}SH!Y3x^zk%(sokthyJW`|}< z7haRIL!XgzD4y}f+G2WOj9J{kJ_`k`3xThwF1TLN_)BQUL!0yGt$Yf^~}Qj|)C<%1O=24fcziO`QWe^??%(>O=!Z|~?)=^s0z z1q3CF_yQj==UXf@dC7UH?Xb?qv7sOyUI=LHS7>N`gf3-YOqsGcRb$|dt(PeRZ$F4E zrt&)r7ZM81zDU<#Uu_O23}!u>e1To!d$>3DGgTJ%nf5t?XL6Z`fQ{Jp-qT)~euK*i@%yAk|c( zJpRdWW&P(s{O=;^EW3f2x>VdNC|hjbMHAXW>rVjN6t`0o9{6NJFt}eTDrOP8>(&UI zIG-`DRO(buL?Qz}Y7wY$Pc6(BDS#b-Mr_lbbR-?xEn~WT^fJKCL zE|4>%NEu`fs!W@d$O|GV9uCVg%aeA|-hvWBjfAI!6=-r*bs5d%Esj=Rvp8CI8t>Uniy&X-vE+`4rx!SxHk41fvgL`16FVqS7%TFQ1u z`n^iwg%5JOsU)QB?{nA1I_|4>o>#F80=IcPdnD@CiFuNcL^^*Du7W?%%TM|5Z*pbd zElA0M3l&Ew2-cl8SPFl`e$sl*ClMbW2}>%k+kntGE2$}r5@Zv@uac~xfsksAfuHiD zZwFD;G^vZK{Aj9Odex{+)%X7kteqi9YWJ?-yGH7)juP18^9fwH&l0qw z>k8kX{LLMUo;+7WLPEY}X(o$#w}+2A!z)BKy9!Gl3&vf+>5Yy}NIQ=kdLk^>JXbu_&r7|$`Y(s5WYz2fvq3jLBNF`MOG zJTf&X7XS#it%0Ivz_kly`ac0k0ZGf~DovB-O8}gST98^gmJHdDx|0gs3@~wJ9An5)yUQx`Pcp_J*o59kMCU zsW!Vlh6J9v-iewr9^Q~pf1BVC9Jj`QteMzKA6hLDO7}m8fTPKkkSg^V{y&SQNWa2c z%-}ktGqBldk&q_Yp{!@nu1|<8y#B>uHw_k+Bshd@rE2B%&j?UV$LWkjEG);|VT1Hv z@{8u<^XkcmJl~o{rr;qZ&OP=JC=7ZTfB#!+4!vjw)|G=g5=4%F%Om4Omx3)}GQYw4 zUmSF6gyZa$(C@8)R1BDiO}7sVs3g*E#K0B(N^O#!1D0_PJyE2wK!D2ymCv5CT`dVLWJP=^EJ`SQ6)K#vP>l9a_D+Yk-TeF++}77b z;b}%e(ez&lF3cqmOgZ6t$RDe9pFdXL4(fTF4xh&Uok=ToheA65mT+9)bb|4emiKdA z8kb5Qw#LmtZ8?Ex9b&p@&X5YMtA9P_c6w(GmuWRku}^+`nQKboNw zQ->W1C6cC3fRM{WMqRYl2n(Dn5ycoUzDL{1+yPcN10ZDI!2A!bxhX~D_f*JOLaM-L zVC6+BhV0m3aF~rs#OpvFWGErgRt{(^Gbh*Mpf0Zt03~?Put&OaEe*_o(>{jG(1<6y z&i4*vC%s`Mfj#L1m&{IKsP!|p&>x75Yd@7a-0k&&RQ)pgjHR19&}OyqbKcN;`_Gs) zn80Wbv*v0nw&!xwf`JIr?KJgahPV5MqLX)_*4rpTvx6+bkwjY*=hJUVrwt9FMIa-c zl)IOkRWav1q)$`&QM1J+L2)ocr6xh z|A){~t0c3oAfq3ECYP1giZ$jwY4WzkOHCEG)sp3inYE+Ww5IyX&3b5gcnr*y@XOf`4RvI> z1WK>%aoxW#EqU3}x1cMlNSi@c%-`WHwpBfupM}IVx-DBeFm(c-x+?OpDZ`{Dv{^%E z!kl$MJJO zK47?c%6eQV`JPdqT_m0@2eT{np!V=n@jvF!3|c`M?P2F$;GfG8yutUXKV;(3Flj3O zKDBwq4=XuyWmi1G$n!+Ax}fl#p%%PEn|bgds3$iMSQSgG0?I7Iw3GjVXic3ff%EGJ zg+jJs34tOes{%&MBP+z-j*kK}sGVj8ZJ2e&(kZlhOd6?)Wbjh+C($@Ubt*Armt@9U zTTAOxDyWHEn3i(WYp_pnJE7oUF;V!-rLfzeqpb{8getje?YsBW$*b$y{^~D;dS&`; zqM5W@ebweK1L-;JGw#G4HShmI+xUFUq@oo!){{1^B?UGsMr=y%=m*a!7U0R5)hksFD%Pxcl`UF`zA0h3z$?T#{b!yxu z70dj+C7b>=F&5e0KXIUm7++@{Z3@JQ=1(qXb} z>)`q^`KrT5jOh9tL2szTY7bEaG<;Bh=!eO6QBpd=XlRI<>Xyw-7Z0s7GpUI#UA#np z7Tl@R0mtN?@#2r&Yb2V!o{O8Xgr*{@xV!Ug+w{>9WAcrML+NUcHLyywOjc+pJ|kr` zai()5yOksb(Gx0}(rzdd&dzO>uY?abWGpwRFn(V*g*0OcsYF0)n>CkDlHin-oK3Pb zjj&(%5J~z$5x_RZ@{vF%&Z1eV_NSk+3m8oV`X@QV-cB!A&m7hMM~ zS=Nep%P@d0k~rilS==9;$xjrkuRrbi(6+*HQnnOR{M5zOl4)T{bg~k6 zz)-pjb&BHNR&!{DAz7@_I!4(lac)?{9}Mu6YKi6VnNyf*--B(Vq~&WMT3yccC*7F3 zJ|jWg*tsYqf$WZ2E^V>>>)PVnshjf3a+OD^>@~+TI2-?mA@rvsVfsEd*{t-=CXkTYO&;Cf%q}UK$Gw}<7ieR{Y$yY^ z>7)++Xi}G{;K@z?HGij}e+k9uPFBaTqfeybu?vfQUlYImkd>Y@)rlU&FwQs-G|-Da z$CP$6_<$q|rEfr-LawT6DW{5s+IaPbKY=<2`(7xoS^&UE)xTJAy>YJw0pW8>9Ii2? zLo~m;JjRj$J&%-b4(pmn5+s!9I~n}BN&;v&};jbqSVMWnpP;?ZE3at1l^Jys$tZq+KB&U zt8kkpBAs}@|79~?en>V^(L^r?8uVwNhNFjSImCovszUm1SXfS%hl1Laiq7n-o*b^- zbtO*rpL2guv{eYJpj*j+Od4-?#C8Z{?6keB}oJUYUB zFyvO43?tGYzcUhO;7A8aA| zFtfVOKZ;m?SyhN!DUsyYcS!!8z@UaU^4dyu0JUT-E-h^cDi%Pr@{s}6L1o@y3IdaP zM8lpKLR;p3Mj#8Xut{-E1%U|q?(;j1PT#0=OUX*ciL3a;E@f8^KimbCag|dJ5uldK z$=>n)V|B>v&RDq+RXJ0WZwopvVFOAsVxRC(J=oj@?k!O{umC~#r4;WnL=bPt$C94C{OP;VBtn})s#HIs)<8E-sy;^#Mk zmK?Dbx(c!>h|(Y|7Wk1332BaF?%mcKv#%bkXd~AXf9V3bL0RS$)EUYskXb?%oKsZ^6V@nC zi3Xn`#s+{mNx%8z$?2iz1-vnt33`a*4Y;`2k-+bhGM)IaaK?JvHp!P<4gE@hyvq|& zghV1b4y6n683SJ|^=2U!=RfnA_fvBPHyxD5#XC&|_dM;oTsk;>9Wn zp;MF6%PIl&oZ(nwce?=gNnfEd^WU<;(;BY9*5vbX{tX^U4LOd^>`CtPu!Vgo@w0wfplxumO%U5$QdscaHRm1 zo0$U~BOIpF#%8r!rzdVVSMO;>rgs4ugQM-V8BC&lr$ zxqefP-}CrhK`MbsXp@p-EjU1@o#ju;`qK_L0nfor(IIZWDS%i%5fFG;`l5rJHUXx^|5qbJ&I^u*}Wf!yKIYrL+aaI8ZbhFNP z9`7t2fO?xgxWalPc5O)3LM2IAlz{q#IWS@4ql^S{Q;r4XI&;6bdgXG;!GIu zAiIRiDSaE-V#-(fwvaGgxGVU5C=Wr8KjCyOBG#JjunnXML151lanE~wNknoOQ0AtR zi4e2K6_k3h31)}i*gC% zm?=~F)hhjY$8jKZHX#70p;=!gRsI!NqCXRn^Q&nE1pda(xW(%1^7~0wz5-B`6G33$ z4v9OgMa&G6YcCkVc>W*^z&QPmOhLWC9fd$K)5#Xw?QPi6hDx8+4!*o9!(s?TMJoZ4 z@`OD_;v0a==(Zr|h9^lU1S9q>4LRPdAn+^DH?u(qjLPIaBjbzzRh1y#RuzuSB;RzE zsyWRhPb&R+%SY_h_ij(qW6Q-uhu9vh0k{umb6L@st*S?ccG`aR!b^?WH2HSyGEj{k zQ9`tY2l(Y`n{7IuW$4A*=gnYP#mGGIZ7G>c1v`fvI!x z!x?gkU!2>0_gNNPS|DX?1)ZH&qr((2o|89p7}K3Yts$^nH`*G#l}aO+A$^qa)Zi^+ z+k{mU6ww#Cs@Oy2%n$>20tbGyX=QDq>)An7)$MD4DUrcLbA`03yD@&zB!)Q=i1EXX z&fTC;{q;m$y+V2T5jE$M)=et#;)q&$MEdbwx{itz^bx6KF&Mof`{QjWh^cS9;Engm z)cFsExm42yP=xAsD|OZ2JckrqsT96)NH$ajrnsDnPOE65OFa&VmWZsk|L;zV2AkrE zHM_ZRIgFz4I+#X^6MwrhF}$3Kj;n}{Q6<{}F4)G6H!E$`QmhJIs-vzXlOGO7%Wf(} z9W$ns(9CUFJ1|^1qO)vGa*&liV2GvB$~uPnnt8S1#wM|6_adID4Uv47?+8|1EFATEmkEP{%0}^ zU9z)=T!GXQB&vox?}#m>T=)@QOod0+46e`^W82B9^-3p?2oqBX$ zp`-iOMABzH!v}0Y_rHVC=9jh0vHEEFd-@f>uzt=FgC}XL-iDQ#KA}QSM~<=k0NKD@ zw?7P3Aq?+clqUBvyIgG0$x<9hP)JB<_TtF%@oZs|XF9_=jaR5HKbN^?fRvP8EmBoZ z-Psx?qy52Ma3t_0C(1(?7 zp5UijkDs@zufEVw?5{ovn`7y=T}Dqw!_%CC+~Gby z8b%$SdVZjH02L$nmxUz+=z(B&P6T}HLTG1|=GW@Q-@654tLe4-@PhC9eYw+CPrgo8 zH}xk@PnJ&BIxa!T881bC{Kj3C zY(Q?)L0aH(FSA|)J9~okpuRF&KkX0ocIcHI-?dKc{vgvDT2qb_`yMA$E9K!2?()TL zJc0Az6%;4}#v0ki9+wK$wMwrnuK ziDGRcE4{1XLKiPV@myufln|ymt+6rKlU4z<+lxk%%l~4N506e#z;fF%?@}HL6shq^ z7ivp5lN3al_6Z60#?Jv0$|qA2Y`&Wv?eK{~slZ2c@ndQUS_KGB0_&g+lAw9((sc(p ztDj~=b@%RY5W{wu2BQs| z9)zEd+=^L;YpFQHP3Kp{m&HLq^+~kp(DQiNXcIPPVYu!uHvk{EmO;JJBU=u z`+M;b5Z)7e(Ks|<0M@RpjY4ZKNpnNiO1~dO5g%7iUV5~Lng$VVzbzz8yg5m&=h$T( zX?sMdM&(+O)VC&|z@nS7Z4K&rqC&EcjL!(Hy0zgRtYN=j*R-8_fFu3~r`|dvE&vpC){-G$jpw(S%1Z9`orVry_E3c;mQa&j>XDo(d zQ&n{@Ma;GZNt(iv?AGd-hXN;bWEff=K1aL~EIad%>aZM~;5s#B>+&(!8BKu(ApV_MuoD#wUw;+5iY>0I7WhR0F^r|2I#4$Y|{5}$*Sf=g>` zb2Ud>$l|l@BX|v3)zDy?7=Nb-*zDe1XA39~KlUCQAO8!a2*8=+d155;yS^Un zj_4-K7ukliO?Hn~a9EkWAay^t-sm5vFGR{o2Df?}UbjGjB7K?`wP{qPlLpB=vSVp? z@xQ2JIRMt;stn~^WX*ErdBFSrfVAeg>%Vt(7eTk!yEU@cj5>I*j?(V)la7Hcn!H}* zitB(EbXo)5=_wqt4n*^4loHHf_$Wivk2b$uS)oSeqD#`dF=BbmMoP?8O8D#BcS(}~ zwnZ#`)Lw4S3tbjoSgv(M0*C1Tdhd+p*yBO%?l0aRduDSRw1Yo8bx%7=6+L<9DZFI? zj0a~2QGo^H;nwc~?m0i4b*1IX{Kn_s3lI7VTd;t9va#U^! z6?H&%WQ$NJo^tU3zxl_y^h?vel@J`(sCu_a?syF?d_BjYoE(XgVI?kx%#jpsvvkt(c)YmZb@7RQSN2^^hto(zDzggwu<4932WB(uIxE(F|g&lLu-c z7K`740RQf9pp@yp`v=~mMwegNK~ku3Bb-3cJlP00BsvP`hLneI` z!h;fi&!r6G8L7urBLg=P4;Aw7mZ>}>zN>S$N&8@Y3>gkH<9}tw;^nM8* zUL|{W37Cn&mfv72hpuq0=eS4$Alg`OlSb09iH@|tAWyT|OV@Yf1gX^L1b~*ZP1Bbr ziL!hgrXgR~pQSpyKXm|askx}qDPpD=Pe8t~+!h)#qQ){LCFIcD1zDfmrgudU@FA3L z7fm?7hiDMzi1&w_34c`gtlaw9Sh(>RNL6Ye3U-R6+S&U}9q*KXLiS&?vQ z5c2%FY<93Ow-mWTL$Qw;0oYFgzUKv|tift?v(o&EDsTHe7W31jQc}KcUqOL2^d>Fn{Q7K01+_)|~*LixGbFbU~Z;e(; zN?X`3oy5Nfc0QSIzbfVYparo#mZYFf`T|@PNpCW}tJT;St%_z9X;P z9mmXmTn9uJA?3H10l#3Btr+p`^z}E$_q3VM*8`ZySi)W{*>jQoEYkgBTo00eO~TGE zKVtOh-xf~1=Na^I{+Nm3pF*uC@se4qosr-h7rPSnC$1A?`w__yus`=kMQ;OI|GJZo z#_w9%>>51x5WmH}1WA&qw~dpeDf^LdMPJbia`6Q_L9knbbQO$cJSeGS~iRcFEr zu`MwJ4AkaEPYF(zvBVcy_OPf>VL+1dpk{X98lFFHNVEK* z*<7F~n^)RghQ%L{;LzYX;MOibD8TD+i6P>)iEX`0;k~0lB1*BssSiO+@ zhxqFwiuAWA*y-(qT&&!>Db@1ClVH_9uKh^V5#AQ~Q&vNI6<7fe9SHtDh|4A~0dB%c ztxC*%bHxD^lfda8SY`_FQj6V9b^XV-faDA?ddV#I7?iMNOlkb7RCN!s)Lik_;U){6 z)HzjWPB*gQzoU=HUIU#9IVZuS>T{|n#HY#Z9#z|MoVPPbyi*LqjEPsV{eT z6xv2FNnTJ(4&0@`3uXncbN!Q6LQ+(+a560&j-2#*3xEQ4e_w>+Au}6ms*I0tjj342 z*r1a235 zOoocgB7MM}!qd&SJ-}Y0wH?+M;;p?B3v{WBT&X!%jH7-NJfQ zE{Mu&xh8MWP_Ik_Y2oOXuhckaaK=xxs>I$7yb4K6Y71X_-*?jhY^%IgPM>^CLnVAn zcUqm0F>R~nr6fZ_F%th+P+{1M@R!oe2u-fD(*R{Yed~|Ezm(b8dwjN=nWp=nhghP5 zyu&uouQ^HoFkR5_Vg76x)uL*sSNCdQV?>}W8GiQ1MHEqCme`KmXlI*Ey_gKDc7oOF zGEUarcln2e3kv3H@NyZob+7!1baZN%eZTg2udOmUnjAUTnctoG}JN z)ymwV{LxgNQ*rtsO1M2#~_N`|@r$qwytAWp`Ng_Xv#uwE+brTULhWZR?TV`DSjGQKmK6Sc}xlqqV^^ zly0ivSHuWwL%78-jozANF(01Vh!#pUW|2Q;gzN{sf8X>!Wn6S{;VJW5d~oxrwv?Gr z>M{J{hI=Nn961to%?M$(O;&ESzaEx8)gn}A=&Osz?Bd&3D+tWt zeiVyokuxGJN3vp=O<)e=Obj3)z#oflcHql4~qo zv)n;8saIMF->jA^YAccij1Bc32S`5V6iXO~5|a6Gi;|8$l;|cI({|xVwoX_IG5qao ztNXnA)exHQOOJRmqWT+boHD~+?D(d-Qenclj5DT(gHDs-;m>E2^2V`u?01CsynIS{ z#iP3%daxhGW)dnm?R;x#lu$IG42m#@Kh)y}9XANos)}e4wp^xtbW+WVk${c7pAjHE zO}n$`p>}P-=w_uQfSPh06rFsraPi-;~{@6}h^h$4Y)sWDU zzYrv5BKW51O~Q_+O0eW>kk_cmF2YR}cQ8YzdP4ablEJeu;a%1lG|;hd zmj^M6morjxKa`4Evsu+(Dc$YFr>@VZ6rWocw9A2v#m^9m4)cLX;R8J_N>DUwPl|+u ze&Ujt%z_Ni-`!WZko6D{l!$1i)=7;Ah@SkM)rZ~RX;^A0LeBt^$CKWM;f^;I8-!)tevXGd# zbU~^6+OnZoD{G&9$^Q(6$h0`hg!#ugu1N#`<#~dUQy&JsJJgV{EFzx_FW+-cx>IJ( z+cEW@(LFwtE)==9h;Cwbt_LXkP48^;H>}bKzsligM;K-O#)uqMj_9^O&Ei z{O)P&pAAeb?-olmQ!^`XLHU0!04x#`WcmX5Jx~Migd$I1a@=9SZ6i;;z(#=)BxH_< z24*$UCcLXVO(Op@63yky5n>WS?bjavZ|*JLbZ(t%UETOfRS_fIUIJ;hFopEm%M!C8 z=ICL4=ED+)e%YX^gP$@!@v~6R*&j!ZDZGH+xBu8#EDo?;0fZKRf&9Z~+rOXgv)_<% z>f~mdR`6=4Y(Xu}ss{jj`2kG1Hdjm8H^)v{5y41dRWk->Pf1XtgW>$IU6pbtil6dL zlPu*)tp$#Q?|&UEVc`@^HHMTeb#Y$K|2kTl&RR~k=FS$DWNh3Vy#FWi-~Uz4W_HL3 z?Ee$`59yP!bNz4BkSwK19XG{6!yKGDWnKdtiRV8xI{&6~0#evC2vRiZMgMtGgo&N9 ztA?B6quBz^k#eg+k|M9Aneqcd1%jO~1&v1`;y2$j!s~|L>#Kr5P#^ zieW|E;691uP8*Wb-hY5o;F(LQ*(=%5m*J`xf2%E1JKMeCYvDL|TFf*GYs}g3w%Yam z^>-bgl+~E@=BB@I=;#6}a3u``526xHP4-s1kiC!##@eK-vPK2=)m+ znC;B?@^m-`@GAw9seA)xh}2K@^jw#BK9|x1F%=6i_r?M$B}E-2V$3`Tjt-srzZ_J4 z2vB_2y%KGG6W1IncHP4B9T^ksEW(-!y4Is`G7j2YTwL4&n=9zdz`5`FzHre|mO zD=anKk#^7L1FHD9v$pX)dHd{c(+SN)dgJgk(fcw*T$bA?iZw~=g)J_ z`&&(&7R9MQ&F1%;i26+Ej$?8 zo&}|DIU=noS?E*G($r*BuSrFld#uy+_>7PnF7tl^m*6~AHf&N$gq|`vGu+)2M*%yL zDgO7O(@1@~MRkN~6&{=qvHYecR5neU#L~9D)A?#RuhM**I3=y(wq>8-Ghu^>|MneD zi?Egu_`jVHpiKlM=ii5aj_HYy)QEP=ZFN1Ea9b)rhm}gVRI=P?N^L(+m~y_l?o%zq zfttXi*d$Jfq=07voo@7f@9LqRdCG5+NqEYi8xXX3bQnWHIlZdbA=Y(2{YI&4ZpXs4 z)DD8Y`RV1||0dM4xwGN_XS@0Sa)%g=GbArE!N$sNv=0$jpq?%VDqGptXb2qWx&M7V z$Dd);UmU{701A8924P=j^IQk&q{bbzP2z=v5?Wa%NJR2l`aYg{a2u4Bo!3E2k66v` z#BoS#{m{Rm{-Ug^_jT6z{T5u~2IkBgtgmJe6NN$xu3n^)jS-1x1{-+VqI^sW1!8T<+=S_*_w(7b*=_B z9*X?F78JgQVUdBTZ#``zo50-rYssaR@}q@O)N>eYm&2lwT$2k!Q^Ve=DtUaZjs(83 z4KViGxmsVMQ154)k*H^sEpjCq8xd~?<~C-b3S$+J#uy1+I5R~%pc?_m5 zfT3;*#p49M%*wb=s}pqgO8owp4>e>!OI)SD!JyPF$S;5Bj$rO^xGb{vz%?n}-lwra zB=TpL$7&hJKEG9vD@VFx?~yhVTR$J#aWR$e`(UW7TZ*)%RMwb$0RcKn zaam+QPcJG=-z-fx|Le}|*TS{E02#fE!tgA>>CXuZ{zOuw&g?Uw{~E6*Dk@tJ5%Ry! z&}D?UL}}sj?Bfq(25QJ2L^6xj!_)mB`2@4T)^)nWj8H|M0g)1B*7`Z{$4BIoiyF^3 zUIaP4!qlW0@FNM5#@3$GAzc;rRt@r_a+K8!ByW1=$WHpGEY?7aF$4Euzhw= zvER!A_Dd&edvt43|K6ZfDpf6ER7Uv$QB1b(OOa(dO2buuyIPh3^5rI>J-bE?UjAHR z;qOmp{BDW~-#(U;HI@jnb=8F7g^hL^X)^KA*-_r#U>iGHJ0~U)l4COah5YC@LEF_n z%dxoSiy~a_EIe!;Y3!Mcf1v!MOx(Qe+XI_xMYtyZGf|!}&2?)(!(smL()dUoa9MxK z-bu^f3t^HZG!tQHLhlFw0`UMWpo_?kTov>&)6JMJ8 zWK&Jf51V6|ryVpp^Ie+}{@e&SpIumI!zd~6eOc4 zUSANx1Z&_s-)yM!tyCsh#6e{QF29c472^wL?&n`LhvWbSY~qijB9oL}T{r>XMF|iM zeFK`8TU%s^QcK#3!C!S}2dL+A|2A6xLoIpjj4RgP!)v(C4H1Kj0h%z}KmjuzJtRva zY{JxRT<`mLDeaCQ?>^Up3zMyh)1&8TnAsx;EBOKL{)#pyjONKZZIRJpG`{fx8OSGg zuFl+?=IKq7M!j4;HrZBfr_J=94(44NVY2I=a5Yf7VYM+V%(y1T<=MWKbIY8dbko_u z9b_+3(Ab|B?Ks*_f1)*Q;7|2nw5jlC0Lb~i)5=(zy?&zL54kYsJT$h1TA`+UafdSPoN&{k8&Z`n5$tsoiZjz37CUj}#h^ExX|w zGOk%yP#HsW5%Wt>MiIgSemw80u+h0pfZ7P(uW1J`kBmm4ypn4Y;FwvO@%GprI5%fB z?j}W-FHcS*&R@JQlbD<>q@*56mT!y}$x_sGuY9bPn{j+g9>!yQ9zIhcc1TEA?{q7yLmltx7U?(K;({@2iB(XC_lY-c^t0VwYhU?30_WoLqv?R35kiJI^wy z7#^iDeC1qasPHFV6Xno&1Ew}i#rMLVvTY`LTO*3=IG_6)3$WDsR)_E%aV-G&%kzP5 zKAO0I&u+UHRMwTk32pMmYnC5|OnZgj$vdt`P!+*-p~^b)omq4E{PLk`Uhfu%r!55U ztsld)6772$&n9fD#2@1-a`R7p(@6jqLjRuwzGS%Rj}~Pll$Sz8Yo|1Zt&_dvh$IT zWlc)(?|pN!hVALANS1qUfJ^DMtyVm4T(jX~`TJR^VKd>x3>F1K9A;)Ngt){5b)KKs zD4H7*s^!e-{+OkYa`+&k@4~zxt-17+ndURF{OW}22tfE7AB|0YM$=95v&{p)zBXif z&s7kGj!dL7AmKu9&U$E=W9I)c_LgyVEJ6EVaJS%YL4vzG0fM``y9D>a2~KdA;1WEz z6WpDk!QI{WkbCd@-*6U)>&~17CjiEme z|A9UuNpWaoB#-3l=`tU@g*(=z!{-@cp}ji>{sO1c=MTi=h0WJUQoXlB1;#W22>5~& zf6&gi5srE`-!T!6L90l?=Zhrn076#^G(UA|RHf5&mG&u1uS`onK&j`_I1PAFtDf9O zjy-=9H_w%lBvmh-A}|r;_7)uyoa8!5^2Mr=Jj^-^7{)|+*kr@z%%`Li%jkZAA0Rd< zG8KjIHFD~G*B0>P3=31TEA=IRuO5mVQSk$&?#EsXTYBgB3`jb05VVYRKKXG*31X%}~*0uf$Sn zcB2g0)2M&;L|``uRqGx<8{eGqq)`u}?pCiMc^uuyNGR*%evb$EP;JuCh7HUGJxLbW zh7-HSDsGsn_($cX5|`~LJb*kbT{msqlF<2S0#G>uRij0i1MF)yAIwsL`(4-H6G!TO zlr6R>`12>ev$x`MPA0e)M@>sVA!I8R3}|%^CZ?R1S4}eJbmcFtaA%IjGBt?H+oq1;2?r7V}s-#Q_St$p%ZG!S6xU=#=Lr}MQ z{?s&C_`Zp}^Eb3W6AKYW^7!YD^hgU*LXv3{7Dp!*w*KUAgR33EU1INBoCv;R^0aa; zYw>h60^=3Ml7JQ?WEz17loA2cLOL3}@R1*~%dA2}QL$tagR1e>B<7w4@qvPTeX(RW z{ih@XJMuQ9Rk%{h`&v}pi<~b%r0l=Ue}3BNexz(n!`JFss|~#^a?hheJ3T4G*pd}Qz_&Ea1!Vxy@ zJS!#y5#LG-8rtl=R^_L{q*#|DZM?5*eVy!}{fCK0LF&WB;zmW>2GVKd!iz5u%=R`k zXu+2Z1*PWiZz-qM3hg$OTcJ-pph@u=;}{8Vd80bKJ`9xpRw*2}iqbAVR}%0Jwe@Mu z68@bkV0r_XfA_%Pl^e4k_NLs{(ylo$DEx#_XhRDQrP{vbt4%^;dnU82e0@In ztEg_$lj*R|#WB|8aE}Sz5E3E#@MCbyth88*Nk82=g{PTKAdg1KNGXFozc%-OTr%KX6PmPy)!!& z5fK>z5;e-%@9^75NHe-xY|P;za96J-Khbo)&!F&y8Bn5>1}_X8u=4+vNAJh>q>DEe zQX+*CMFgjiSu^sv>i4l8$^o>SyG_^n|gMlpVIcf$un{#-krkQ<$P^d!1Hs>IN6 zmZCR?v4veVxe`L`IICWee!)k|31<)_P@lYJIf)<_zPs_kRt4|jqk!8?@|xvxZ1`BA z{)Xt>w@gx&9SG{G=AMcHLMB44i{2p52rRLQnqB30ysVWuzsuVSGWg zfpvj*=yz|;lCUAfQ8#~KDvgugyJ`4FcEmHxCRh1P!9!Qy6W4{4mmnl@&a&S7!Ts>4 zTjH1A7Gkn>f({aTMkhW90NM0{RE-q&7OqspQ3F6`3vIpld5*{}0F&gDUi;gl0Yc^v z%9-*2Dsg=)lg$m<7c@$h(CY&Oua&WuRmHjaHjhFt&eZ*9T$Bajx#YItc+9M9eesv>|uMU%SY2fl|oQZ#r1k#T15~ z?^M`)!rBw8v&ta%eJNE>@eF?mkdzlM-ZmCRP^~|egO&P{6&v`aJEG5_eVn}SW`4nL z)0}HSzMyUHqqL8WJTDdK^Lp2&%OhaI$XeAlk6r_(uf2&{iIvrXZrou;;t6f9y$osh z9JyMb+2lb(t~uy*2M98cqwkddF(BfP3-Gd%&8?W@58IRIKj69BOoq1UMK_Fi(Lfpd z2z+tzhcIvYuILkdmhubYhI2pjBVU5Mt-Z{*@aI1#`#HvoDYfafI|h6(2id7$KL-doYO-$d~)Rn)28vm};@tDVsVkRTc~>X43T7spLsD9je}8s-(@a zf*;~q;0Fh9djQ`e6I`BMxCIN|haitzxXXJF8IIS!FT=;CP76`inyrH%2vz3a@I{ZT zGqNmc_!EB3q~|@DaO5*&SEkO9mvc#)F)A7*Hgx5G!y6vQVBp)X9>I)lQrYv{arPHe ziOh#FN*hH}jf2pZkh-+=G^u$Q#T^o$81))1eL%Nn-y5N+v6DPm5Sc8hgC*YEf(m%3 zfSF?ZE>#MynX|vBX7av>gNGh|)w^^AeW0}rDm8Ir81$ph-o;jy6NYu#x=`G%Hd(am zk5KQY7bj{xHRnI27&O|Au-owS&i7PLPIlfiY@sqz0rla?bFWfW@mI7+W9_Qiubm~~ zGG(RevrP}Gu&~KT$p_op8xN&$HgUj$RzOsRVoXv9A7`;J@*Q@OK;5_~oHQ61&Gh#O zp^r-B0uj@c9FlX^We!WR2%i;kZ!w^CYrMpf4uaYRnY z<_*S0#;k|XS!6wqtHiTQU=Ara1whZDISRvo0tF zELW{d9{;efrOBspn2udz_>-a)|-PrX0oDMJO;^>xcxloARFO~?JC5UbRgv~)x`HS=tq<+~hIA#u# zPWXVBKvIO%CF?tNF>S#-4@+P%K3y;SB|5NBX|X7J&W}}g;MhU@wFHJHP-Olia;+#t z72AEiih)Cjr@K4xaa z75SmzlMtLq(~7MZCU;NaArHVe^8PX^U5RY0`{WF<#Z|vR^fFogMIjT2XyCTW{I zQ7{?2cJHRZ+Yh(>LUBIZo@jjZOe?2`^vJ?j#_kgOR95~duIHhH$V?SdG-3hU8+{7X zbakVNwrJy*mqW+>cxO(!RhgE7&?v^O=%{cQ zEEHuZj&=LNVXsRIytteC^z($#_ksEMQ}CWb@X}~q(ikC2OD?inB_pX=cPY-&*`~2A zv^i2e!$#>sq!yNT5$}*8T5H9GQX8L|h2O!*8aBYe3ftz6g1^_KPO-)LP&2W_^m)^! zNlXZ#^r0R$hx4m#IDFqC9qN%T73#_baf>rU%-gnk?XvYE7Qo8=zjvPtGDoB7n!W_z zLO&s2Lv!BCg9oV1Nu_s1x`traFrSHYK#FSJ9s3}sD>^o?$gwH;K@)P8#f3&1cz2l; z)@U799?n|z0C#Ji1;08j6Cb>PCh2iso?Z{JZrXb!4t3nCs`ankltuwx!RkfOn=vf;Sg5k4if$exl^_rc9viLe$19R+nNg`(IZe064Pl< z7oOW?8@xC^;@F0r6k(g0nC-c>X8wS%{W_#`tazX>FnT|fh@1`q%^z}NhFm~jH@&_K zEx7e%ofqL4(Gv+`Al^>f!hFhpPg&1-S%fztOH_ON`{A5t;-~o^a1h9L=n{@vmC@qT zl5pYy{?%^YDCc=yLUO)Au380?26UjaXCAYj>6!DV`pB-+ob4w0{%>LHH$adpR^N=W z?`#p!K0{>D5w`h)bV)tKXI(o!gbwe*rW z9wwLy=*YRmV`p8#M$8xKzmq6qdmq_*nv=N#WkRuoCV0REja0`6nhy(AN1!X#MWwOU z2*NkvenUFEd4$P6iP+QuahyM&Po9dw=u*XlyQbH#G-;!d1B=MlsKREM>342;SWH>2 zd06Z~2Lfg)N}Et~G)}w;tzhEZ%%2u@^UZ)ePhb6%m~rQyeVopr)M&^Bcox4)7t9A} z+y<+{Z-z3UOY&L9ov?31!Xi`{|Lg_t@d_&NMf9aSY#NOgXOBy_70WwN!#_2QS%fB^ zRzHZrKP^lbky4jJAOBYs4#XC5tfGaimcyiw`!~Wwtfi&r!Tj?R)V`o^M$xR^RXD)b z$PFcxWZ_A`)PM~Ne+ZQBfH3xJSHQG{nFmPlB8zOubr)5}Oh7h!S0b_wZoBTDp_)>3_Lb zz1#UkO*-*0aEGxT&iXvES80Z8a#9bdxI$pTz$*G;>_tCKQ^$ng&!7j{YcccZ3NNw$ z&ejC3Tyl-`fI$YYke*H)^|iq93r7`Cl64WfvvB#~ukY-Zv%1)m*mKoN3|g63ZA*ze)st_4HlQDk+dRGk6 zPHlTDgNqqB(pa-;Xp{a=!D6Rm{%Crh*tm!htG(wcv~9e<_43@9MGb8~=Zq&(ylc!6 zJ4GfkZtXYnim%q0F^$5DIHm{v2V2!)vgNax#uY;iH#>u;5TVu;!$qoc)VpSj$(Di8 znZ~QxX}Q$?rIuYsYiLW?XpL#4v4^EZ(!x@7adMzW&x7cC*m>5ZIATCfXQSJ|C zYLERHbRq5nvSJZ&^xFq=2ZN8rc4R6-6&p2$6fX7n^G+PaA|*u-q<>rAhFfod4hXvu z))ZJA8s$woJo+YETj7sJJ2DKBm6;!P_>IqhE$ZBMd#hV&MUrf!Dq2{uYn=^#a%MS4 zvYtpIu5}4R00W4V3#y7I7l_n*HGXt$99Xi~_czz#xkz%-ty|b49Vo@Ge)@XTeDYoF zRD?ILxJ$&e%O+?oh-f z9ZpJFSIQt%TJ7uZurX+Zyaq+6`4Z2T`IcAxM(4mdP03gvt@b2#t_l=h=Uv40A$-ua z&*VQFl1MPbx(A(qIL_!0ft$n~5~aV_v|O)WFe4 zs2KPY(0UBLKw}9Lf5E#Fmf&+85NP9ie9ejO%*3aWkoQ~id(Rd!d?tMD*+tg%4jHUM z`k`Lu^oOOUnc?C%ZMTW7YvkvkplxBwm#sv-4J}%EJzImH**9jwzmwlF-SYe~x)^VQ z89d6iMv2=O+-FN!)@oclA?L)N>Mwv;)YSp9spQ~N=!|qm$wP0h!ASjV>>fA3&FCL_ z$S1eRB9j|E&YRi3X#3Ir{>dn5aiWL{WzA@(DOpvqx=M~X$H0SaSv7Y<^rZ^t7# z_nsu9xL4~7knCC zv9z@>dp^qGE!c1`ZuIVTmF&HjK}%hyH7jW(qQT15NZe>8qTxX6H`;yiCjRDA(Yy5B zpuL)iHxJj5ZIHVRCNEXwi)8PUOtn7HZX~Z%&8IRVQvJF2K|%3XwRH2yje_DNMM@$u z>+puHBGZ~iOy=ZgN7>LsAILnkExb6LX+zU|F36^k5A)6%xHRyNNU{u}iMijuZth8a z;VwtS?7!6fjY8gUxWyIubQX2JfE)E;-cf_QkZ2TRD=kiE*wD0?dVrmxDXC`&P*9Au zmU5*3ho8W1|Hc;^jWFBhip&W8cFE+JH6+j*ssF~>*70PMj#ix?{&YmcaKRQold&YGwC3HEi~NWJOV|*$%qVLc-3}J)WwgtE zyT0TFt?uo>Ry?>Tmq6!TY(PS*xf$I)dZfJY=Rq%vzBqyIyOiLB)*>^yk4CAc*V6l0 zX}Km?+l*;x#PNF3;=nrLVGS!mRMnP3ZGr(8xS<$19rp|(1ypyqxQ1gft`vOG>^nnb zcJe_QBTW~m5oC5z$?Bk(LNo3r*6y{kOLhjDRlTK-j~ zXi0=t;&HMh<(5IFp=r7KUrnbmnRr2?_ac8{V;_Hc2x|hMx?7EPT|@HTbnCGYCH_ZQ zNMjy$_{}f_>KsJSn@?hNEfs#fxn|V&Pjlm`dRR^fK_30hM~D*S=A9Hm}2APDAW~`h$9k=vj1Wy~~7?V@BX*6L{4%zkf!t1D;-7p6Ly` zkYCa8vhy;oLWb_@I$p1JxIbQsc9BAEC(xllB{LS^-W;-a$@4zyLs5(_iguCdJggqJ z$u~J6tp8dRG03V$<_-LIUB^qcZ<}y#ss4J{RFf7R8lVh&Kn0f#=es!h>R7QlsgD9B zh8Uq2qKkrgc!}$ALXcsg4IqxN&l#Jub-A`XC9MkJijmMRjJlD4QrHYC{T`Wuyq6{i z^Jtql1(bpWWehIJRH9<4hXI-%c6b@?+8868Z{!EEPB7D1UaCBJJ1G2niV7Nw&A z9Zv+oH+86{D`a0+P)!Nva&41x1A$UgPbS*&9|7q05drsoo=1`5akRYUN1h!fh{6nQ zKG}q7A>knUt-!zZ9iM;b?VReTjN1gD-tRkNwHM12T<7)F(kb@k{kOJQ{e)G}q!S;zGde z<4@z5070{)|0%HFwycn8YukyLYDv_S-tb#}q8prj8!|vgEUt{ZJQ5?~tMO!m%bRHY z^e`jU6y(~QXJZy0n$miPzMsO4kPWE*#h~g~_Sum=H1V4e=2mH%&f9os{WR8kZ?O^R zXmA3V2L3DV-?k2rY0KKFn94|!eOqMTRNx)=8FsO3 zJ3<%HAlBt=%~Q9N@Zc+Yfx8Gcq4GJuknyT(!%# zDX(S32OzD=<6rBK&q7`08ej%A`wCzji6}6%?uZJb^;qa-@L&F;UN77JT@%rWJ8+^Q zZ?33lzXKkckAX6KdzCX=Lq1b+ zFI4W-vL#Lrt3n=S2;k;)ha2O!6YZt@T#yaYw*l#wT$y>dn;VxG16ayk%Umd%#HZ=< z%s8?=XzkKGyL-DX;Cxj357CsCwHw%FgwDp?j#AAjn`5j2 zUZ2hmH6P5nN{xc^h$9&PX?G<3@y zuGDz2qo@!u*+CN?tk1tKiu~LuCRPu+3Y){YTl*nCilqvgbetscBM^ z@_Q7U7~h~r!oUXO)wb)9@7VxPo5bl$Vl!w{CUsdbs_4_{{v*=lR=oA=Mb`Va&~3L* zKJLfFH|XD=vy#`)eA>0Gn1TNF<8(K`ekqDrnyVrH_$GISw&oGM9>(U;r@p~>{@iuc z?9fR&LUV<^Pm8kcdFx(JFv(q?`^KIOx?Npad+pucZSN-j_YJJhmrOu{!gKuL^lNF! z4||9!uLVFtW&Pl}l)~PQDjo0lj%$4r{R#BZTB}N46C?TKJbmEgopkqee45l%d$hNx z$};*w$3n>`{*C_crBa8F;49?5bfrdka!F41o#5xLV=X~0KE`R~=Z`jTPn|as zZChOzY~OIMgWnYBY?DHn(Ioq*Swm2Wr^boThQekT zxB_4G;H9Dhux$=dEN7ciE|(md4oP*-W+Le!E`Op#rQv8tuBo^IS%WUmf5g8$hs!Ed z!t0J;!jt2fPOAKB8VQ{n&SuCf9IhH)_U)ZfXwYYdE_G-S>k&;KFILO*ZoesVy)Lqv zab(!c&3jbd++70||JV@E#>=%p`2otRK5Rh-UFlAyIC)kgl&9871Sbv#xen1+j) z{Tol=)2C_&`*8PQhnlXnL0B8yUh)y%qc3-ro^RO%o zlnH0)?Ov-X_T)nq?YTJNRIwqC^xl52mc0!% zNZciFd1I$ob#xGF*fgw?2;qvN%KfKU0|vFK?J@o){LtOcb@}aT9y&exJ9Yh+nJwgo zM#I6h-fX;%CY=zUdV})=KQ~fE6$vevg@dYKz@7eN1!0=~y@P5x0JiWU`4T+n>!ur6 z-$ymYHf5A{wX!1EP8TQzorcoc%izQCUNh8FeFvT3=d!>($l_)gB>BwL9{8)W6GM5X z*&Zb86@fNpX#LoO*vTyJzN}4z6HhQ_mD@BUt=W|_IN1h!bhd@uASeSepfhHpf)n7%5zdb#o8T(wYBR46UQ&HRS+whs2e1k&*a z&<69q_-MVMd2v)4E*hns?3u6<^42@F=Fo&mEnp>Ox*CL-imS1EQNN0M+u*oIf#fu7 zfl;K*U`?l|a=*A8sI;b$&(~G{-09esdNFl7^&OoJ-h`?XAU=Eac7luKw;!!cBg`hX=gvIFlOT)gb(?S1rIBbgef%HDT`O8f9J6CGvgi~ zc?_6*#PUz76vNC&iZMj*c}KIG?ET;-p{-B7CLiEu9g{n5EO>l9C^-4gAb|&YFe&$E zHcW5OV3=RFRWEJqT!MyF4x0=3qayd27O>nl<@?(=o$v!{)5s)X`L}UpSL!tPD^4J2UneYV{jS>l#4MXykFT zo?UNIe7)Fqr<`!6|6=y11t<)RP~F3>7Yw0R^VILnG{rI4LgG&T7TsYq#yXX03Tq69 zu>%gPttMupy^nAKC%-`2*=8h#AI8p5eEDP1VFG;3Uo2RIWoi>!J9PLE>zu!juw%g_ zMRp~wuj}FmEB=D#U2~`0hFoQ_%mKPw^>ib-Ueyn3WcUCdv8F(*bl`m;&MUYpc(m!v~R^LnA~mHKZ?HOAG8FI@v*BvS$MZF3T( z_3$|l?WjC5{ZAWStN`Q>ti7rvDyU7`g?>Csp0B&c376szzBNXIM}vJeA>WX>tp*J8 z&5`1bXTiT~mDxWe6=6?3 z=}lc<=!{{*Fh|d8T=QeK|1fH!xNYrXHH++eK+#t=SKV-vtF#_C<$>gYz~7#f`ed8WQ9U?7LApp!A?Y2R`qK# z8~;x#06B!_Y*4YCP>O1+26#7}&F3h#_TgJ1hmvm&Y7i%MlEcCkWYI+Gzdf zgC&$(xb5q^l?LLD=a-K5=a+2YaepZWc~LynueGjln{c4%SLR&rO6swnxAun-RI2C9!V7 z5jAG^sb}i9Go`aa?xk%mn+6tU^D5>dBUv3brL&$rf~kOPV8OA0C6&fp{2tv^%hc}# zixiVuOBstIk;5es7<8Tz1rO~#G7*?yptj@`o^YhdvT9jQFq*568(yLojVB#8Lt@k{ zPlh_U7T4d2A))Jcjspid;%G1n7e2aj_#H004zN-jLk_1b{n(lCd5hwicX>`FiMd(@ z#b&fOwk3#jG{Ni<5e)@3Ec8ez7t4_RK3_(h*rE$XYs-2K*(%JZN%t}BWtd7R{PuN9 znRL*jttmvNb!d9n*BjLUGPcoz8YK{s$JO1Y_!|;?_lCZE2tAsN^l)L%5G6>3EM=^K zMMwEBOQrNbq;sGi?VKGg*v_@XjVCd^j$8kUDKk@HQ&x;DaZ@YIfG2s#K^9#nVuCGk z5vPJZ+L|3?6U1>DZik?LME+x$mG^~>FbgoWhMcD7cBhL{m5bWciu%j@@kOT$Ep0~$ zx5DKwPlblRLLQ>6SdzXb8VT#?KUuY6$)8NE4uyrq8|a!}D^tk+N$6 z5Xgp_El}yK7S$6=P!z@f^d6|p7N^37I30R&r$5uAL`r(7moa3qEFM^5(WYZXNw`=> znDwDhxJ?v#cBeS=U|~~W!%LJ4frgg_j&)OsDErJ}Bu{YD z-S0k_3KY?v+`%)%FL%4y0gwC7^{T*%{nUU5vwMfh^caHAuIZ0UvN4ZAqYl2-TGQXu z8beL}T)We|%jX$XnN9(hL74thgtsL!oOLPVHkH1o1G&cJ?p4Th0BMsL&M9-J( ziRb$ra(?#G?-AY23a2gm3~F~1t%iBqTxeQ3-WJ$PtCFe8fg^+Kxn`W=M-~Ad(S4K7 zcUk0kiOa((gff&rE)SMiu)huB<9=6=H1*}Xex-{qJf>D~OdZL#%-PesZx%LeV4msQ z$ZBA|vQTj(Ru*(+_9hx_!C6h>PL)Zk-)&(L%(?;aM!j(6mRM2+r-gpgBf2y&&8&1Q z@J4v9b}Nv59$c-iB-X|eh>-&-npd%g93mT-g6^rXhy32aJ{2ZUiu|;}|KY(zc)#&9 z-gto&%5PX_FqnmIlOB-*JV}Qdg7r;gA{i{VGtH*qiXgi3kI62PY>;7C%^J6(Lld~f zzt}Msam*b-G%A(?R3amRe`RkK_y{YcSsUlT&agN>Sis(}7_G+SbYZ?Xj^;1k0@cxT ztPng>X;3}J{a2Lwzud%z^LZLWNsan65Z7kJh%H&#t&Q_R1mvGT>pDH+`!{B1>KPVv z*k9Xp2uVfLz(2)!(#Aq5@tx)euQoja4kmBBQGA>aLP^*`@43)dJ4nE4T1noRH&Ae~ z+fX3?FAO|Rd`#RJ2Tt^2yG63y7%lwh;@Y$rgFmRQyJ6swNHf&2aBU`o&@jTA+uHC* zZ^UvQ=|EFAhJp6NSRYx^#9&`XSpbbx+BJzG5k$Pj0L_g?1t;S?0(8H|-{^qeBN#;2 zG*OwcJk(wq>!V8gSSk_}EO4r20g6rKOhlDP7$jJpVQ@_J5>yUvp|Yl>{j_WI&e*u6dgL&N}e|{@hApE(Q8yUzSB+g z!a9BabQk<7%o2c1(BtR?+KmNaY-bHl2rS5%MjY!xn+sMDmQ@SVepGEx| zGW8cBFM-ksIPr0Iljn>muw=*96Rz2)Pc2i>YVt)il>;vdLva5|`q$_D zzsQyYRGbJgnt7?6i8HBN+7(vks>n<%2ZjA=+M&1AVQEE@RH$1-Ci1IE-?BfD^<_yC zB-5ehO?XQ{#l4g76Fw=0M^d65fO$`NYynk+>JSfpfE4X`(;gsXdpV z(^H5_h}GaYr7l``zS91_Hy%?@bL zgsaZu>uLo0fW{J-`HnjYl#=zO-uFAN4LDD&>-a59&NpovebJ8${= zCJ1)Ob3`LIXur}4`rBYX6Ii_eRobv0fY}+Ymb*x4%1?_M=f+jN9OP_Nva?(FHX)ydhESXY=#pxPsPWt35sVeKchqt(jKU13?_+p z-v=iCkVBC)#gO_u+DDF%9(L3RND=%9$FInflgvG^Q2j8&L6?lBwR$i!VOm$peAPUA zzBE~H_QHB&*s|zgTeg!`8kxlIRTAYnOPmr_pT~Ka7q|}4Xty1_I@j46*V&fsqDU<+ zK8)i@5F4ZZw7YJNDa^CI6wo5RP7~a-th;7l@0$#b`DDCuFq>nPXB(pj9B8)@snj0n zmJaUqz~s%%P7f|2zq6t^wOCz@(z%f&^puL$*(b_-G)UsY}UK$o9;(uSGiEU zi&B0yna)LOHK*w4Wh(=io&$5xH@z^nJ|o*Wk=LMmCrEo|#0N{rS<1QM<04Dw@iNtU z_+q=Nm@hWslWj;eXGEj~AQ?-NhpILbl~=`2Fr=3fFh@xf*~;$%RW{9fk7v7bzmI<8 zz0>f%D?t*$a`NIwdR<;2*bEWBibJ0%`dG$mM!&VSa{p;}i_HO`z97k09{;`BIrr!C zAY@tOmUw5YbYoS$zbtjlGTQsl*vFP2n#XfSc*TNeftza!kzqZ8u|n@enmI&Wy|QxE z+v$2|e2qc9lK4ch&FAi>XM1xNMKB7Uhvtb&r_Fm8op(sz_kJ9xauGR9&bD5T*2=s| z#rDxMJ5-1HAu$7Zu($C#ey`9;cAqKFxL;MvyfH1haO0&v*Yr~bpTjl_XYgsrGjrT& z`N5<6US?|T{*O6Op8g&ly%i+F8_h+ ziUBjm>x0#UhdZZ+Xl)%rczky3T%-OM_WDoDJ=WsP4OG<|Y3Q?zM7!Ska{Tt`t!g(( zA?)?Pl*{g|k_)-U?0ImUi^lu8?H3abb=d9-ca;GaPx_p)B5v2w^m5;`ed!96cgls+ znPg44Q`O3?TQ;1@ZD$VgM!DggI#*Rt)$V#)>JLgQi@8H+=>`YcmYj|uk-^6eKK0M31 zpTqTOi%!Yvq*HU+Ym=+y=3d$S)~B_bsQ1{&SPQAUA)L40a5K$E(!ZGi72|^sg9Y58 zPRrSDk+pZHEsy#D05tJ=k8u;YOs%$@EX@x*Z8Sfop0{?dG@s(;9*48ng&2PFtOPxF zS0Uuz<`s48S?8-{dY*`CRGI1q< zz@^&W+TO8jC*R}yv@`;YXz*vZH$I)8^=ywT7(VbexOm+pM3W56{S2`7jV0EJAeB88o#!-tOGQ9-EQ?Aq5zy7I$1L-kLOmWCzXPF zK5myM(`_i*J6+t*Z8|&u|2711Q3( zL$mj>E#wlFRt2VzeZ}og>5BXuNEluK%{wDQ=*n{>bOY=Y(L9wlQtAl*dR92zCEH4z`D4K;@VD&J}zC4kiHjm4o;XgOuq%sR7le-G7OsB2+ zFdphN6U4Ch4vr{L-VA@7z_JnUbUJszGyXXVV4pSljR#90WwE<2{IP_1Qvv?d`KPns zd?w%p#Q3*$=}z9b@dsHz{j7;ao;_fGvxF$|oh#G=rh$5FGx!mF6>Vq5yOw|%P}@%z z%(Wn`b4&y5pMF2xYF4#3;a}~e$iz_ItP}Uv&3j~Cle6a9Gw4AdmdKOu2}DdFw>=k$3+_yAeSEw(vu2GsMFuiJlxvK^ z`pt?`Y3!F&|6LZ9(&VpHXv?F7rs#qBly5bZ7yPs5(YmV@6F;ll22N{Lf?I{~O;!43>TPNW$K>r1SIY$?A#cKFB11bSC< zxD=crrDNcgvwgPZ?kzYAge+nE?86PUqmnXZ5sT*(K2pN+ku{h{nVKzFN|~BH_?|UX zQzOS}mp1|pQLLzOH)t>6R(Z`yBmw<<2C8Yh2`7h>ZNkM&ipaN*Dyc@Pa!&jT4)6eG zk%A*Ud)%5Kw&|)Nwhj(Qx>GQ5+G2N=4zELGqwF@mknU8aM^^q!%!$bYRUg(vXJ(H~-wfTsR zvkhuOdB{vst3-+XrVcY6|3Hfg(`Ekb}R@S5Sb*EKoOd+9~?T5PYCS z%}C+I{~;a9Rrp598ooC%KeZ-}yNr#PnU5TNd*J?;gQ&-j+a8}}ItVw4BJH{@ zK$Rqd@VkWF*h9K11V@2UdmyDoniW%Sj0)6{hoaQRkx!jOElg;oNp8(nfK%jKUhOEwQ447K_TiMZd?ZSVZDD4RoAZ z+(6J2^Nl?6^jJFt z?u)A>#d<4zxv=ve)r+DR0IIe|KRPd7Ro(N<{JOR|W<<8&%a?&wSeO)LjPkLC4*IZI zpZLA`W;lt@t#{W5t!BWRP&^u*YU%U;#o9LpcNTnYPHawW+qSKVCf3BZF*ljmwkEdi zWMbnN+qNg%`Mz~P%iw~$ zhnJFbJ3QEytA*9TuSl0#!{AuW+&3$|+8f)212ScmO4q2yHR)BefLlT8G0*0?JJLnE zTaGy^yBy*0CT(9q1;RgFl4KS~G||rxtRblWT%NBK{HxpWuP8`TQ8NGe{lf-l=l!p+ zak8;-vokCF6NPZKWF=u`=Vs^mzgPm#{b01zhw&b{kIv`DRV6jn2Qe8Ujm7IEG%}1~ zv>S{dX!AuWNft{BOX0$4xw|rR@T0XgqSLIML6Il##ZUN(9jv3ADjyzGEd`beT~8l> zuRhOVuDrJXnjiAB11{!=h;XeGVI#jH!~b21y_2_ajeL$72(>^T4Ewbn?&Tqn){r}} z=AQq%ofgCfqyH|lfJ_$PTVAT8wW92*dxMEp0gB892$%G#4N@_5K6Y~pF2YeKj6 zkkJoEWlvHfe#+~C-Wt@>uJY7;An{FlenDM>_~qs?6_pwM1_)W~L-X=!@slAZdZ^TG zHTC*7y>C<~Osd1u@YCu0!BWXqwz%;Mu0;PYEDa`y;uyAOv_Os^l)Ec(oeun#SM1R`*fGJY z{=)~+v`TG(IbJENfUsY+q_GfsF? z#??xz4A~_GV&%`6f5b9$1yHrJpmX;>>@q$PYpA}_DS$ooEa}~S_94xRS_HQwp;fde zbnGji^UM{88m}Gef!cx7-Q!%!7XGy5&PC%OfhhY=^s8a=8x9b0q}@QS+rJ&on(c1C zPtWx!^HV2^n9;j6<5Oaldti4H7`S&}dHq?EuHc&eLBS|sZ-Aulq5hUq01{q{DRa#} z;VA!<1v(F;Pb0f}2oV0V3a6HIx%_h|+Ij^wJk9=(K)lDb$3zi$mLc%lV%=j2j)>7M ziE1Zwn~rw9aOmC=l@qM~q{S=G{WzQc4uqS3Ecy_BcVJ2;o+j>0dl&t+!VyFlQ~m@o z`9l8m3QXRI(wu2+)%iWVGHL2`DxCdfWE#^0KBUJXzkfRGt3BT2t~jt>gcq2cm8tu7 zIv-X^zcy6~XXh3l?oF?ljHm6j>dn~n7#O7VX&K(z93-7+s>SZTq^c0VukIbaXIbge zY;BJw)%eNmHM-;36fzfeua>AYm&bIH589SGX=p5dLwDvZg5!Fv3 zA{bjJyZ|qM!cs6L@N$222rrIrYcl%4gPK+Gri9rGmfX@=sp^(&Wx1$ux2Y z5aIf7aXhBkPQUUbJgQVFx~h*ulSlawGddUmn(9F*Xb|WyCc2Y zi0U=jI4je0$_!9uTV98ozId#Ct@VfcPw=$<lWu<_ToO`yXnZ&BL(r#kX&|@ z*PFZIH*NN77xRD3fdA(egymd^T}?n<=>Kr^-+lJj=s*mD_xp2@5FR7nL;p*NAu%`w z&WM^g@Y0YJZVH5z5%(cXz*6UjYc`zDdR(sl?_T{kXTX2M|KQI}zRQ;eUDSWQI`>F_ zdvbC8O%2>8FUKUY%<%=2o^i2--^J(1FtH`RHP*I6E+i((-;fGNEMv7uRB`9Kc_#pD0-CfDer&IqL%;~)S%7g zaOegE)f?$Dq#wc-WyYf|yOn04OH50mgb3`yy9S9BUC)KfE&4FTupZik$l%k~Rf+^F zu}=$V&l-@VyB{CCfy-Co&K@GevrXOCN4SsaFZiL43dL>e(vir+dS5nR?cWH=E6?^G zO=a8?X?*(9Z|sCb;{ct%vLc!u`KvV6S> z^hn7D9_cLRYE=FKdwBFMJf|MP$A+=E#kXAB;#uZQp1Jxx4jj9|hvzE6RrR$K$Fm|_ z!7tr0r$4$n^e< z|DC_6n}z;_XgKE!u$5|2>W2SppM)3IQHMDak<5bO3oK7`+gB`adp%$taxdV3Xe;CDN$BH5`Mz~#s$ZJyPgZ3ysp?6IsyzkPM{UfRkWf}Ti&=Xty z6WI;=MMXX-p$}HHE$Jf)b)DYq3%Mz^Crcbuw3?P27Yq2yPk?2JZxHI8rC5@N9Nq2T z`KE24U@oae$v>7ClhnEbz`fB>_=`Q*lo;+m0WXvgJw~f9ecs2Zy!t$@}}o#D4`&Q0ae7QrV`dtn~rWcNV)2w>rI>uByOE zn^8*#FClC0%$jvR!WX2yA15#+RSFgtM#{G%r($5e(%>yH_Zw{2`s7Y|EU!s=)xmh1 zbQerqZde@4hHw?(hs#i?s;TrF#Hpt`JVn6!1hhC>=>s;Rl0v&p!J!Adx7L4N%#?Mt zcuaYcT3QP76z_=`5%V6HaAu2DWC}a0G{4c8Rz4~*K4R?as;P}55+C5!b&7gz8j+{? zb=$IM+c}IcZ+I(p1RrYfke3rKB(n<|bfG8fJp5W*MB2nDX^AD@TC+HSkv(>f=I)O- z+;-zbwFSgRckp3rcqeyjTP~?NHtQU&W^?3vsA(qaa|@I71q2jEG&5$n8d2Zn6_f3F zJ2oU;;;)-Ae0c0i+BAsU*s{$Gs>BY@HF)9d(?R*p$eJ~dg=5m+m3KZ3bW->2(rvC% z17^9qLD$i~9h%>2(saL5x78Of#OF@$X`J4)-vXq1$sgTVjYuOBf5+fmjk^g#jAOY$ zv)j=g`_5oI=eM{MxreOf&lO&Kcb)1g2Q>$i3?r!*_1KLX`z{b>4AONL*hrV2C3ayO z?RG3kd2B1l4#q@8a^9j9CGG#nXKFP}elD&GE~?j5Aogn>Ue70umh9JhzIFz#72$bs zSOa+u!$}&!X48Qnv2&uy^3>51sL(D{Z0SoHQ?2XPs6yZTwt{n_AC%t_J#JFtPD^=Y zj;*}*OjDRD4x<~2Q;O=}#S$w&xY9wLq}j&W+dS)3-P6-t(emTa4|Uk{`ySTYDx#Ag z&@TmqJ=qxq2%pg^OOaCZ>kUspgI0ih% zq_kDpF=^U}i%M?5Rz_+_tGHE|WLP9ESo(%rG`SUZdN3%WewA}UlPg%~3gu~Nour(~ z(zslqG$gc0@%GJ_xutU+U-c%NZa@e{PTg53)Z{b!!}OI+6%L(L%Il1&Ggc=`FL8%u z?)$|rM8mLKxN_^IWs4pBB7AP=oh(3Y!G4z@mCo)LbC4gSUVBTAE37jE+Y!^B$e<{N zX08N?b9nX+F}=KMSVqE-t2`WY0;`w9NM7XITcJKCZ(bE^U{&77Zp zOH6$CMW}I%qN+K>iN=DF^z0|#Y}<5{uD(B#0gzt<3x#pm4JqISY}-znHF#>#Oa?ti z=sc@V*b2M43JT^EUmD#Ewdoe;6n?MoA-U}ym@DDOI-5|^Z4;f@m6a!5khDlIA}P_A z)AhHjgQ*mb;77pbeZbPFSk43SXGqEAEK78RAwl@M((>so6SNMe>4xT&nvtnPyi%Tl zbG*g8`kOQjm7I&LBg?+Ei~Vz4GVqdXmKmXf8Wc_qY($0Iu#KH#0hQ(O{L}GPTH|l^ zunqMk&_3O?T|**i3%RoOy52gTrfJ-b!r;5Qr`I3O)2t;>smV@z(enT_0JgriizFR6 z$B$Ih11<%lEm_a_yy;&&mX9*RGI*iBs})nF#z)o%=I=h~7d>n`6UBtI))zuXz zi!}@wrl+`N!B=9ek^{Oid!7H_-c1^i7$(Qu*sYqD^!?dhyg}|1ZRCI>aCK$KRrhgA zX9%YH#`##E%Sge8nNVXz?pa}0C@$3(l3TWhmqnupV_udazCPo$)ja{Zkjsh|M!I)LSRkvX}-%p6S zKC?R`v!sIu+ELR4U@1EG?9#1kXc{!PE$Jcz%gQt=rE0sdj?|@#d@&UcJ`@n~LCCpC=n?{m{L|yC?6lNwHO|Rje}CN~84q8aohBwJosu(w+m(bM7g(u5ZDjT<< zk0)uBOEjR%Ct0fQno{B=X;lu8QDV`l)C|bVF(@1V$(IMo*Sg zr(u;2Uy++oH7-VHPomMxSAdH}r>0}ohigGEl2ZWG@+HEJDU0b?CBsMM6jY7#(Z43) z)JVjm?NaUeI$?em}mq=8jnIUq0BSvr6qCr~rsA}3HWz%F;LvTH?|qq-|gnWMbxL`kH! zt3pYnvipOQNOjjiPM~xELhf8+_b270+O7fRrOK{7<)x}|D*A=ox%#dWr8`ZXez+YV z_nc&@_NQWiP|o)It~+HjO`Uf5TGDd`dJCOXBsw>}QzAMyol_wC61`J4dS+6qMz~E< zt4?@DQmbk>eNwAl_;^yQdboU2t5$eqQmb-!4~RwA04_RP5U&=IitKFZnehS(qC>LC#oZ@hvR* zL6!!q*`zN%w@dqLxSV1Xe^;PS*LYSP@=tkHy%iZxOJU=5*+f_3WaX^CGc}ZBDh;q= z1(7k;$Cc%rP#Cs=&wv;I1r*#A?$WINHf}{Zv&zlOjWfp8N7ef{Lan%#G6&1j71-0p z(#O&S6sXx0Eu7ht*%VFae$bg`jBx~8v4JRx!xg@!4ZT)M{w6DS5<2so(JB_pbptQ$ zwaA7YvlufOJLqK9n;uN+e~@C&?&XM(k1@sTTL@)0V@EQ<8%rAO143Ak4rv0@pxKbN zjnWm~+YE$@5vi!v42y*ny3)O!j$Mm6VpbAI`{o11OW0Q0a-z%l)!@~*sp!?}nZ4Lb z>D3aLz2Hj`)X4b1rtPla4r7dC^m}0yTe?fJO0r2h8t|y$sL>n+vp}&!{cTr*O83)` z6s4ld&c-W&{EAD#2c#k6N)CuBy^q{OfWW3+u}iuwUSdvki{HBoRcE-n3$1AbsNliAG1wScmMwp|KcB6@pVrK7 zcSm{X@7q0q` zrd1ypEjcCu73tNxo%HF|M(tTAY}JmmB}_ls(@w~yI^qi6f=|Gw_;XJLie1vV*s}X6 zrw`TXLD~uyY<5OESMsHhRdVM5wACI+F|E zcDyUnG+xllMd@2#oQusA2ij-{%9vZ)0E%z5yQX@QKp2?*(@=}N{ry&Sn` z3#^lSDGSaIc*WLC+JhXaWeH`!tdK#rm-h_zD+nG00`a|5eEZHNI*;CxkE=j7z zRtl5~gWaCkmgUR_n?E9$4V11%yPrX#iM}`QB467^Ad7FhJ+lxb&`O3_qs=Vxn13Rb z_G0${U`bbzh6rPKgDw8fes#gWSrwRy35sXCwK}&3;j-`!B55I_P4RXj9b$JT4HGs0hB{DHnFLEkN~E%4U>hwV@5YtGwlu^0Jmgl!ii;y0gw>9~Kn%z!YI z5SS3OP(+5sD@*y*BiOajHPJPb@0I@+^d|VG#LLw-xBpVWlD{VTxB4!0-kC?X=b{ns zwaECBW^DhXyxd6F)f}iCut$`Wn-3!ezM6>|W%3JLyfXXI~Ng zZKz9F!yjvS-E;0f&$P{qI~8`B{Y%UC5FhaC@ay2~i0c@e)9&QY(A(bgZOEOpZxm}y zGfN_dNP19yl*al2TB+oVX4m@gdfdKgoF6bh&WJnW@>)uNis0u(jA$)zfSZ= zNvmeNrk%lw!H&V9LAO`>E0dkzg7ukR!MZ#*cZcHS&7^$BgG0vPp?Ulk12hLhbOz(( zqq(4Qc*pEUW817^v>LuH9?%tC9KSi_fyZf$@#BK9qlHta1N)agJ{~z^Fm(rZ2R;|G zJ9D}Iv4KDI)O5~K^_sx6ey09K^@`p_O!ZXdVtM^hJADKFx=X!9n}tKpK~B2YKxCcnU9WpU5BYlosZn3>67U*wb?hvZ@{`hpP-OWR)^&s z&(rd`*uQkwfOhTnm5vjvm6Xwq8JhZtbuf+82g0*djj!5WiyIn(Tzo4jl@H;UbhpK> zn2{aT7v2V%f_x|G6qCvG)CDZ<i1r*QR|;fZnxNK>O0 zL}~vxa8r(Dn4^?XX92io$g1r~$hsWYAJ}3a)fM{3q%;ls6wB}rKEZDnitKPsLdks8 zjiJh;kPajBxCTze+;$nA?GfW$q88qx`a=C6s)07Mp&Dw>y@RV%oD(;EZdJKUNQW_w z*l$~nw3IEjzgi@%Q^#ir|7p=D@mg?ma`OsSf2BFhPGdH?4+35pr?nc7Ezt-!(5l+< zXP=L0bd#u<302zZt+#9c9Lwky{wu|M=hLGTt@Ie6smUW0^xN%Z@KgKBwI0GxkJ7sa zKjAAsC;qfgeBbBiPn{Sta|fZ9+(fIVTc*gC+>c!qQ_76jn3F3`6(}sGEY7biOt2pP zlboPALO=R!1j2AxdY0IW#rXrvn?L~|f3*NCBgR^2Eu<{)N^lKudjEr7HY3^@ zLN75D<2LtnyNLR>;!f?Yyig1duV^eSyD8Nt*N zE<->2bM+#ugV{pa!!&=%3|RB;0dE0s`N9ps4aN=5{bdPa2`tm!CcwtOBA~*bK7ihT zJYd{kK0w|-G9c0)A)x0A_Ln1wBd{ZI`~VmKh5(IihHV8Sh*}~o=>-!V$i<%C7i_K6 zf%1QgK&byJM1Ohw5+EpIgo3&Z0r~$G{?BiHg4-AxT_9BY1vD|&K0+;a_pV}Vz5aU$ z7+rj>yk38{Ccm#BJri?VTc5hJB&Kj!aLFQchaArYׯm`Qe`Bs%gyOM5vbC}WN zBRq9pmEZ;$j>@CV?A^n^!(7b3--$)cc&MTdqPY27`a_KXIOp+CWtt-tH`7`BLTEStO!u52?m^Igkj6?= z?3^LWZiV$-nexR@(T#(c+y#9BB&K6ph(+o^RNW*q$@-yFzd9o=$5;%#S3{ku%+5Hd z3E`S3OnIloH8KRaN-i6~5}xFcmjPEv{ish8bpYMn@oB?Sw;|)_nQ|aD}ew zS6dUYnA7e_<2$RhE^0U-5IAx<>C9cgkCn@gtwA_uPD8<-9dw1$5AkxM)OU+|Qtbzz z^u?Eqr$#y>ciI+}*`A2JgWP8{+5;~+hU(!9hRE}WiWYJDp`-d}k9SV8btd*&&aW-2 zM7PYgblwH^kw2}ntr;F-3mCirJ~?NMpU7-urzTRnzV`tS@kCCi#s#0!o!Qa{sh!pW z)u$Y6ns~HjF%KVE-9gnu@SVt-yydF;D3hl;;vSENF0JQO&7&L=WVzz zSnZfLuUxANy4P+J9vTz6C+>TrUS-j~jnU0}vYWE}WgFL@s+%`&Yi~b+tsm1&?U9#5 zh;6a%CM8~|hGQ)9`i5+Wo=1RqotMfO0!}T#&4^Haza!x_( zze*ajr}|j6A}oRxF7u@6^8FlwkUlpi+<%GL9CNv8oB;fV5R#-;HPt z(R)*+!HGBw3K56SI9|f#Fr4N^dbz(Y4yp?7`slQRi5ytNt@7tQC#y-JiA|Q zn~$@ruW6k2SqcWQ=AfQowi?x_@~-ylVVh)w0Dav@%RUzF0ZPySB}P_OMj;#tE?iMo zK_(nd=sASAcLMy4HGK1JR? z#xS>piU-l7LGQ?=&`pxn#1C=)X>Iq8pNs&lj+U~{z9G}u+1WT0WqKlulfHQT8fPS}FU2p=7#f9uB&*@=3Mc`c zp2ozXS#1IH9x*GtN(u0;JW*d&uCM3NzXnE8FUo&gC~%dIy}!@vhZHVm>O&S>m?t1( zu;S9y(JQsO>5IiW{^1m3Pir+xCh@EgukYj5@}wBUR)y{m3G+>+Q7Zj@J4~7Nc4jXx zK99;JiF0-zAr^l2A@2A(8b#2FQ4~c#IRd3{*4D57VFuUmvqBf5>*3>_E`6qUEzRm}RqX=clWi=#Wo4G~u zm3MC2p+yqsZM50(AJYhC8?CQLD^||>S!Mu4pby^XMUSUU2do1EQ&E;5?@$CT6Prbs z4I=ROH+D9|KD;Bc1tYey)@`rB);xKTIv_r$(-oL?{nME+>-W{ROQGbU-Yls)Yre{wKPk{5<~pjLaO{$+QsN`p$80#Y7}mw(oO{Tip$b3R^+kI2gjn+0 zfHz`$`hbS-z2La>8-A2Nm#nEOT~Q>W;W`IsBcIu9_RDM!qbna*F7}gbm;w>g_&9*_ zV_;P^aaHoL`DMQK-aWpqOUPa)rA+^RLn9ZHPvqlUsrURqui$=C9Eekht`szdmbASk zyHAvex}~33Kv_~vE5;r=&~aEvr*pUOl~}BQyS{o3br8IW+F%b4hZ2s*aj;Jd;?_VJ zFXsb>3X6A1XZwDLz9`A7+N%5J8UTxP_&x0}?Zj7U=|)#J(f_t7qOY*9SLqux`(9v_ zwB4>&Sx?eWL|HcudLT%SZoSgR>r4yAqtdey66tpL-=Y%l*qMmPF|i|mM9BZa8Qfqz zqNcnHqk5I*SP^DUe7|E`5zZg=uwliBK#RVX#ME7r^v+B_%E#-&Ug*p;vjZakut?d~ z#L{t%Cv>{wpZdg zPjlyBT>}q=lLkNONuew(M^X?Y*vs)jv!nLQZ&x2X+6$XJS%Te^%|Cw3Gjl0Im#+u2 z@(6Eoul^X;at_RP;Nfpm9)LN;IBDLxbeK7Z%T&5c_tj_B5&pr3wA_`EmL##@Ytc>O zO4nA~-i=9UkBefA_wg)BH!mrKa~^`&6XWNvxZ923r;`QC1E*b@%2ynE zIv%v-@8M(R5@z4?>*<_g(3S8ygQmyxadS~6OT1$$dC~AZe$}fnr~#*qzhS@Qv5<*@ z649;ke+@OD+efSG41(`a!kh7mm6$=O8L)R{%;GMqM)_uN9^?2L%xHl#Nsr{qE|>lE zeP}#J<0v8<@2JYxoiXsu=B%Z|d@&-5e2q5~IoqtgMt13<6WI^fH2vu#Km~KX2KMIe zFXVj8!y|6v_PYrSaR5+rVnbyfXc;(ZmCZ}|kHu3Zk||OJ%TbkPelr_oFs`uY!Wb)R z&!uo1p`VEl-)sAhML|ETY6FHOlMk708DY=I&gs^kpXKjOin9j?2d&~(0Q#ChSCUEo zwL#VXJG>MhQ_

C2uxHIc>W{wT=$d7qk(=;)XAHwkqhjz@fKPf1Tc1alx7JP4>5g z)UE4_^eOSlBKBXsmhqC$`J3j8>1*8fnfBh>v4)2H4;OzsLSrTvJWkRS^EKxO@Qmq& zUiZ2}bGxgi&nSfnBS@evIXZ{+(DhjzM*0-}o;F^Iygy%c^>>2kB}QOPHOLUM$u_?z zO&n&9u$0850%{}Uv?Ti&sLt?Syhc&OM6(b__r1J=j!Gz5puob@KC0#LUzoTFzhH?w zMPg8)TytytV-VWGhN))+eJ&tXAKjopAcDW(($4aB9@f^MzBv*ayNI}aB9+yZ;6Wd6VM|(RrZvxB;6Oom_JpbP z^#+X7su~rTME?1>=5a?ZH;sZS#k_yPTgCdZT1q9`@9S<|g`YDtu1Au-Q^!+XP#6s0 z1#iuD)nu}9aILD*>Ie4kWW1&w0^lqcksMB0$jVg^>StdT_(Yoi1DQxZOSd6YBy@%z zkUK~ZEUlGaZHC@$#(qy`#>b|KLelu~j;QU^TZmQDCy)BW@CAd1jK}O~5tzd9_fzns zjVw^~%VTgna*iT;W~arS77p!ttOn+X(3x3wE!fn(FnE`9h>+d$5rP5nRW62vmQg&7 z`=Xd$p_>P>yiaNCWG!J4*~II^r&<}J?u1YfC>qtms`xv4oRMzStx7DkJuNrcWn$cyl6;js1|sr9U!`wh=jH#lPlU zHz^+DDL4(mEh4alSV01&AE?P{xQ*GLj|!FWxf6=e z72`9f5uyEya|;1PD3U68vsaGKlNo$VLEM*5@TMItlpFgbwCEd7tKjC7{!qcv-VS_9 z?_3@x(l-Hl2tZld;MIJP>toR8?ze`u7KNgrk5`-rll^ygnH`{En~;=vhZ=T;h)mQM z$_EauN$h(QxcUDwvG1I56HJ}~g$5$^sYi(WVc4C%%ovyGVi*ePI+MZifZrd!sqbwl zY@&bZP{fd`zR8ugVXjWgwe}YgNj$5z1e7@^ALn8q|GawfX)>OHQ`J8st7D?ctk9}H%-bSZX#i8uOL@EV)Za|499SuW~M>f#SG%xlN6ZnizRlo8KQ_-|05D-^0mepj<1tVH6z2z)BOx43i1wV4qNyM9SUD+_FuaTs{ zdIu}^$l{V#HK(;4_NxZpUAhm^#oFeXv~jg4i8%OTQuahBbGLdP_bo`hG0*N|(@cdBfZl32TMYWtRqf;|lQ4K&v$%#xMS@ z429tc4l{4=Xxasj4jSov$5p#ervUJxMx2_KqPjF4aD7fBm)vHXQ}yX=6@CUsg%`UT zuDs;22TB!*5#QV4nW~${J_IsrF7_yrdjb+FQzj&ZHH(z&rI5R%OT22wB~j3g<&>_qyAbiEJQqkPf?yI@J@A7?K>Z4MFNsSVDAG3 zM0#&ojmy$ETD_5a2|8ggD@A2c&?Q*Mh7BCV2+##sNP^mQ3M_oAyEh@xNg?ZljzCQ7 zG_HHv1Kx|&sgtus$)uU7C1*)CpgV45nLKM!lsk*Bol*E57NsR=I}~Fl751ct9gMQ0 z7Q!Z5>rgg=>OLs9KHLAkk6-fQQ=D7oPsu7WGzv52A*T); z$^TQN9z42NiKgf0S)e2AX#=)XzN%U~m-ZJ)9%d{I->Lf%vFhuk= z87&kg>0t7>&1fx~V(x~lp1c;OoH-b&g?S71I!&`Q+NR~pv*inpt`7<I0F3^HVM%C(pbOWK8kG@PGNCl8q-GxZyb2d8TDUtT+?I z+%RcU+e;$T^zg_{kW5Uqx?XlOYoW&`PPL)^EaY`ZvMY(|-H+cyZ*{F*yHHdzla~U) zO&eCM=8}H)-7={-0=Kp&nG9D0^tzt8-q=+P3Y7!&YH+n|4bn9pUx`{yJ4u$)QNTfu z6{*m#2WIJz3tn(da4f_!tqbU1t3FC`wxqgj*ph`5FV$RXVU_gTbpvhT2l&DSj})n1 zxZ_2vH2V1^8xSV_nxqUWLq4=;@Ppdq5w2#gSgi6z&if611Jf4Hy&W79NbtjJ@a>1! zK1!G`pwd9&-A6(icP?U^^&&@DPjY$0xj0l$?E9%}AN0!^WI^+yN5bftqt^M>iV6Dp zvxpsc&)I*&tHj*ws*OSCo}W%*JS{`&F{<+Ye9`vok$)J!dFWH?khE7v|Tix_HKH*W#(&AU_|w z{&0@O{ds%ITCv6ag1Jc+h#`*lz-b=qtMb3va5 zeSh6=NT=JaHNhQ9`}?UuyfYc z`e%hajYgNRmEqjrsLET@QPFq@c*6;9iJBf9MT%%IvIH8L2n?Mw3>UGW^dmxO#>!7j z4aTVhe(7OJ-r?NIZz(dtd_H|@T129yC4fo}C|Fm!k5Ns8GjP8pdO5m*&Ta9f)g(rb z#!@^ask&PzX`zoh0G9LH(%O9^H`hg1hGv?6xW$|~1_hZTdCtEJ#tdmpb_OwM%+aWr zu{#B15}DsQ`y;?Q*p1A~=7d>x@G6EYi{w!N)k@xN*Oy7#3DUo3zi0#$SWGiqpugr2 zPK}UJBOA0v?^!l!C=z!&S{$5y69a=qVm2?!eX-Vm5h;!O>wzZC7lsz~_cyLV^DRrX z%l)1FJlpbi6Gj^cL; zOKrtxzZw8e3u0ZUrj)u*8x60Xk}D?Wme#K^%yoZx{y06ouBidZ3S$NP1hvYIAx7=pQAN8Z?AK;e zJ1X1NeL51Nev)IIQmW0=U~FF6s;FEh=kr$8A5@5n%kJmLxu6X%k8P_8>{4u-Aqu2WB zvOzzm6fbEkV-x)X)Lh87$(dk!zX=>iKU|%IO{Xrw;3IGXj=W`>{n>!5#R;ng{LdCg z#~s2`aIq5>r%Z)ZgE3#WlxVYP1JA2e7BW&!jK;RhyG^t5edNN{AZl>q0-11`0$di? zx{64dz^5XXcvY=&;_;nsXxh|sA?BS+vU_3DaJpkXXs%ly72=hL@7GmKd%BzN)xJHh z6>^ID$ICZu-_s|_Ck23te4736|0ssclaY!DqqG#ZCggQycKPb`SgEc{OQ(kXDDmvwRz#5-3}(ltlla_i*q=^ z(qfUebT{l1(ncJ07Jb}}X_A2ff)eac54OHH1UZ3TNZihQ9qtOBqm3-Z0n zUC;UTbKh>8CjpczJLzk;)?fKloxUziEoIE5Xb{LXhNh#T)$f0*n$U>YTrIyiLKU~>NQI5|w_O-BFy*oUHidEkb*x3kq z=2H4?2~2X!*FCnONpcPd=pu@B%g>GRB-a=oWTh-({{rGo$T75jDwAnsV8HS(<9&`` z+q^~eTkk;VkcUy-TdA0zLx)`3jIRiW1>C2IsG1#imY0l{mrwa^7GYeMP zK9gkcH&;7D5kWqZ>38Jq$}#^+k8>Is7Dpz}J3(NVL}V48hyyQ66Ek4!*GT%so{LN8I$s7fEM0kP`*@hPG^sLJlQRk zwZc@&aZ{1hEmX9@Bnwpul6GP=!kuf*2W~SJFxWQiU$(TBd0OIx-?WC4@PgkaN^~SZIA}WWUdZtw#x%^7_{Gd&F#LMQB^$RWiz#WK zh^K5=3CUUIMNePG7Bi(8@Z7S-IrUD*Hw#Nb-&5MDnkakEEFSY&FGGNUP7x;D+1^ll*a-XRfCXlg@q0z?3E@ zPX|?hDKzXx(`e#UB5P@Du8|NF+;t9L+~V_J;u7<)7;O1n6m%f3WoYcTHzRX7;VF&R zUT!w=*Xw?cpDuHM)H$>-0SjX}UHZr|Y8AZpe%->WJp~q~Tgr{X9rY{G_Z;vNihxoL z{W<2@;(FstW0z3wvEjm6=yem(4ULGfG{;xrLA(_RFIltHizQaCVX?kO9tF2YHqeSsV$30)o^TmSvvZ%9(?4uK9Xeb!J(3 zC>qSOa{`0Z)t6ocm+23f8o8J!PX0#gFaCx&l2echC0E zQ1wHLPJh^?bA-`i9hwDXbEr8+VrQ`RG86B7U~Vr=F4XsAegs()TM^@qKQGf;N}?gw*egw5d8Ut`a6&#!q2KQ%U(+x7)o)MPVo~~>@n$Fe_W@4FRu6A44Zkt zIO42NFXwHMQlWmugf&>dF}dyM_FexO3Q}kz%^&Ay#>y_I-({~e&Pd4p6464orx~^` z(x#wujwu zJi#DRZzMCkjd^VCmcw1)q#Bmb%=|aTL5ZJ@vox(ity|bP;{wPgM7mxj=`K=&I7lt; zcMrTN4-6G&UFL3y5exW8K~Z1}YUN!6iq48#3{H+qI7lQk%`u`-lYf5+fs%2>0c0KH zGf_)hn7gkgU)A?qe`Ej|X*p?|0)G!@c6QCT;~q0+qf9oJze^ndP+E*-%3QkNGlB&p z(MN*F!bpg`Vxt+`&Xw#NvH}zDGt0RCraZtQ94+EMEG+Q1I4H*v>1U9r*z~dUg#0n> z!*$v(jdJ1zhRoENf`3%$j&TMN(7j^8E=db?E*eNVomCN_D0D}Fo{|vr{i$w8v}u(A z2|uP%$8AsI7HI|m|CF-n5!|h_R>$L<#1p(}^fB`3^O!L;Z-jpQ-O$ag0$SU?=d}Kw zl+G@(Mn^)6;M(*|9v7vZcwgDUywc&3pZDXU%7&}%mwXhYT=#os6JOQXG0UqMbYM<2 zGV!uhH*pvVN-806nq}+eGxy$Pw`WzbqK))b8&j)18TyxOCSHCeqs);|<*N8V7{Lv> zE%y6gWYw#w0M3a%Wbc@x9SExx0-RxjQutpoqj@@>^wUo z6Z#4AWzF_7Y1sPf5qP~>c16VIP_a8TxBq&*|=mv+;QQ%6WO(-S3aOetDBPsw#8Ky188Wd%Gb?qT2G=>ZdagPjr_R|@`gty)Zee>tuC_;P(LUFdT+w9`w&!&Su zXtV9JV$EZ#r|1Li1q9Q<KGFj@JaQT$S% zWSQ~1|6=zKuMkmww9?+}MHr1;;4vYrt^Eo9P zQ!f(HO%~}bb@*pOL9wCnuI^~`;nUkPoEN%qJ!^6}(GtQk33)^E3XMp`9hxs`jwo-N zvhOo2d|8OGJewM$wuuj^aib)(e9_#lwZZJ_50#G~;=s5y6Kkz6 zNgiw9%hzQT+Vh$<@2_|Da!I$S|2w?#}5($2EpGIdn zaorbu4Zpt!(#8Cf;bzw;@@ObPzU56t`MX5MbAnz;8l}y@b42XT!I$#?0aQS%zp6@B zshTCv73@B_AQ6`);;KZf5cP0RC)XnSYMnfC6i(F-Mf#m1*pT3~ADL2$>*UZWCl?wz z>v`Nr;NC5n$pe^q0K-3_zG(J$Q@IE=NN)c8fNPJ7KGYpS~_l9Yk5^35LI^pjkK4K+g zQ#6)MA*}65YCqv2tXC@cQ^;;bq

  • hxZELCzfWN{2->`i}Ojv7ASUDF0yt)qmz={ z3X$&FWaH&FfkbehKvN(UL3UqC8-KB`qdf_KE7YNHV`W;fWMg}6B8UFUxFNggA64x| zk>_T0$^^@MQOZOvzZdnPM^`-&ft98aY>D!d5JIiQc%`;;FRtypd@X00dc+)_QGa%_Y*jSH5bT~(O zJb7Q`qXhR#l5ACSO!9j^R)5tCvOtR(OI=?>?f}Gf4tr2!ZFS2Ua@SVm8#b0~UY;HK z{9xNveT{tm`0zkv@>7HUMM)3mE|K&PUs*)o!8K#9R8LJ)U#*B#zzEU+ko$NQaUFJ^ zp2K|i|%s|FCGNvJ8ELlsF35xV7Znsb2h)?kgeTrY`Q-3(;10&{3=}4Ug zs_HD@mRP{^w7|O=7GS^Shm;O#V^5ig(R~PhPWT-!(@=Phq>iaZl})ZGg;(Xo2x3|J zKY8nmoRv&Nu=Pb9msF!4d+UpSwf(^NuG#y|PqyR^eD?r4wm#;gLdU~{N4Dj%n~#hR9od@8ZaSh!GW9K--Ma(Zntjb=%%q@r z@znxFd`${`O&}F!rOX`xp8&!TE+h%jf;Kr6Gg$+J))7L1Fqt}Cu4Bhm_dBUWc)b9?wwxJ7%~b6!I*;(z5Bnxwbce1cZ1)mR!v zS}%V8!=O6a(ib<;8l6sSgdOeig?Z|ih$e$1kMYE%hV#Q?!#588F#F8CwqNSbql;&6isaYJC}2Ib@iT)BS;|8y+!iFJ@F;b$a}?L`N`I^FZDFObMc60MtwO6{>-sHfx(_<0_~E?HHcnMzgJ zq(Zc{;T1{Hy)6*}F9ZbID36%v{EdXiRN2m!kqA3giGL#gBJWGLkuMW7#70E(9%3K)+u2A$Jaju`a@tK!)1IE09?@hHduEu? zTZz#_MSuVNb^UD{clQs!J+X3P%f!A3x?!SWVlwkwZ1>RQ+5XYnP4mv;ohY=%nNTG2 z)rlM)!jc>=!8mi;EXO0}oSf5n6*iFNEsgA<;&uLcNq*48FyhPvKM|NfoWal9oq0|| zPwSsNE1~B(P4kkosD($C)28`LGCD~?*7enC)PKw7ZKzB6W&KG0RwekZkS0>?p-`Of zuSwYkrVGA9CNor?-r#KLP1G!FbcMYd8a5?E%|q2p{%DBQulz)}GikQj8#f+ZJ#lz- z?Hf?XYBs&uQnW=q7FMlMGaCe(px2wU>ge!+6_l|$uhZ0@C=7W5zRRGa{iCtfzIAy_ zt$%hDW4(LFmn4&PQ%W)k@7Vo`+QWR0XeaK(`ZGZY;d(_U)GNB8UeOixitVXa^lCk* zQO<6tpAQ$khIxCj31Gy^FnS&oXGY2CJbxM|)6vlL5^8HN*bVcNy~s9U5U>geT>SI- zB^8y`JpU-JQF}CeAkkp&pDuWBl6iJx*?-k~6B2cZ+N}fjh{vK)X;sVyZ-_T8rS*&D zBK{XIrP0Cy(1`w%Dn-S}M+t&psVMbjc$FPre;LuPhqxNYkf|OX{KKTrmo$Mqa&&XD zr^qMgJDQ6YNPI;{wQ}O?c_d|IDh->15H{l4IRk3ioSc_B3eCl+1?ReOdz|ZXJ%5ix z1oT(B`n`wX-%D_oiAF!_vy~IqrBoFuu1q)jl-qUT3l&>qU8IQF8x*ccK7}sKzcT-Z zl++^%U5xM>*U1&@fGU1Nufc=<5Y5Te#EN21v7;kUY%Eg6b;jg;bI}Zth>mWof)dh6 z>C;oW6u8`{Q<QA0i+_V zNJRi>gS=6vP`!#7f<-VG3|C?YNtO%cOAD7rwt5vt_N&M)WiZG?5Sd7_o3yi#gCk9v$hht9bB`bbLXAg zs8HFFzW5(wo0mn`t)Z@~&K86g-lq>CD%Oz(IWG}jI9ISkDDKz5XVg#nbB8 zj+Vs$9Usk=x0M#&Eq}Dat4=7&6UaDE)~Lu(4Z6@3A|oL(2&efV83~dBd^JEu0%Y7o zekw=?=N4WlXa#FA7(gC2`gXRUMP@k&dkXLk_!WflIn*&&GZ@sn2K6K5vJ6anA~}U4 zxupC*3GtlFohkT{4fjbRNb)NDAU%3eWemA~BZbjKcIAC&~5h#Ed3QmtK--U--cQpMq|{`lbl&g zBivKyLwnK1anu`7mqKauz?TH#%P`_giui`imnIZ+2(b4AxNZPMcQlX=M2RP$w!=Z% z73|u|mu#dK=zmlTIUeZKNsw(}vPDno1Mr%30CJJ~rpClzSTA@71tqfq$P~<^yj4O> zO_5VTHkooscR^p>d`W?irCi{1+47#M;+|z1G4A(;t$OC)e#PjmA&)mIkXq7F`kRIn z;{k8jqGO(afzb(mw>N60w59WPMv+s|Pzf_r`XZE-{eM5wz6HFE>dto_nvpakX=XIi zXf%4iWyzLgTaqomB+Ktd{7T{^j-A+fI4>o(ad;I-LxJ$VkdOd8YI+Q4~Nx^I(p0$-$V5&29ds^xhD-sDqJ8YNK`O*qP$Vjhy#bh zg%?Jmj`zce;P`S_7cawf!Jx!yZz8K}BqxWFw|}ZxJpYP{_1f)jgPK55{0}7Oc3Qn0 znSYVx2#rC9rWAS#A2*6R0#`BG#R&ERPZN+MLK|y4_Bxg!EJzhVxev(lOlfR@Tm2v& z4-#KMw`6RL@08PNF&=kD&{!lB!6FgLaw<4i_Xu?upD+3<<-~SQj-4$x_xqf_Q#sH% zVt+iILn3VC-y*mM2mOPVQ@P+ARrg3vzDChyS&YnC$?tGqGb(3ARVfe3!7>)ROY%)L zRQ4qu!MPM3XoqVI>tzySn=Odc&=LTB!tBDz4RQDbe-lgM;&ZUVoD2 zI7+A2(FQ%GaG55jCb!x=YEA?c&<&pIOC$$qJyFQyIHNWvUPdI*rd!Bl?9?BWE5j7~LW zx27a#>-Kc@NKna027|(!%M-c(Uu0Sm~=;y3Bk$-c}iRx21 z@$hm&7Nqj>ET(0juwYz81S=k4?2yFgkLxS4)&6vii=tFIceuLFdFoW4_dt6mfJ%2O zI@&#PFOMk@$sA}72{fbOC97GdQ4=Rlb;dllc|~D@X3sE z$cP!ck24V^o^--;7tYM_rV~LyRo=3XP`wcuxM7pUK~n%zqru!S4Y>?L@VB z*%yhj2rl~F!B>UZHzvX*`{SSU>L#yn>Vp5uE1kL|S$v1>)~fsTI#V(=*f8J>6=r3|O5WNie%xHq~0A#`84&!-hIbg+<#Cw}vg+NFsW-N9=14H+UGu z|1k@ESd4VnNLmeLVt@4_Mq);Py*t>V&El~sH^vMSAP&}VGWaoBM}*kH4N7X z*e}2WR=`z^)^~SBtMaEx?{O;uBi_NU{NIIw-j?+#u!5(sw@{4ulk6MJ6sT7~e*zv3 z0gp%_4pcg4W@qK+`sqqO3Ut{?=X2(a{1JE@=^aQH{1x|JSLU?)gkU6N>5h#OLulQx zh|3*`xmNBO6Msn+iDBT^g^=A|5pviIV#-KKLCdr-cslF#m7;YO}{7yq$# ztX95oWl)MUjsF_(vrgoYJkv=J+|ee|#6do8MLGdTGf=wIt#i)Pskx?PMa^u*{PL`? z_@bgePl11M>P@X+-dpEV&eOTo9oBf8K8Fg{zK26u@P9N3B}WtfYw%z^e870Z*%l-j zi8oppWlh4JT_zK=$78D5R^78vwA4nUV#7eS;o79K_B|$Qz)!X|QWv%Q&9t|9q#n>t z2jKosg3;mu|1ahP8_;`UJ`<34YRdd5Gv|4nIJ_nwm@z*J#5du2JVzY%|MdgChkASuGQQ?Ws5xkW$AOLb<+j<8szCq2uC|%s%8I^&J;APo-5T$z z40XnB+lq6GzAGcNPB~K5Eg=jPQ5uPQEimV1jP_aM{E9#3cdr$~O0@y%wcN_*U%(4f zn4m&JKKd>I8ia~wj2r_vm{2|&V#x2|buK{(B!Ax1NP-rFw*2>24||s*Xi_Ou zq|OAMB8$C_UII^%M(!!Bg%rBsMQ5e6Qe$2~H)d>zCUpE~)&EwFRVPoFQ-p7hI{qs6 z4;;n`Cy2x4nK=`yQ*$!D>d^6AA!Xf{Jdu<8a@29z0RkwDBZL#h2`hwS$q70WSNC@$ z0e`?Oy;ZD2U|>WpdDB}G&c=vCL*j%|LD_;yUsX$}r6*nCObyrAYXcHZfKLcg@J1aq z;c&V)9l~!6XRAb-VKgSA7HA=?o^c1PE|V#kNd%h0f?A`cz$cuB(6ZVJ$?g#apPaAn z0ps;N@g!1%jLNf)cpL#Z>nzXEj+ww?W`FuIekS}?Wr0dB%88(_{qFVmWxQ#0%KAQR@isz5C=}q=ZKBmC=6_pV!zgIIOw+Gpuz#Lr zq?i}3_7ni84M-*(=TYMov!+?+3(IgbEej^F5)0X*#ReMZ3uQnvU6M=WwM-(yDvJnq z#O%?Vv_x!oZR2pYKmr+MFtcP`%GF%~`7IAP(K4{Ci9p_g_CwAKQ$_QC+tm}TLUX0} z;F%oQ+W=7+kU%lNNAGgLO>cRZ%6|p8@mPjdI$bWUG^?FQW(%4!YK}@GM3Bq*JlrACoudTOvPFV z11#XhX!AOiMhSzHROr8JIE@O-8GM@UJdZx9RFOD%jvzn{8|6xB^9A1MMt{#CBH{&_ zjyfWO7h<9{aLZqTj?&ZudI4Ywr-!bX_0WWEmMcn?Vye&UWe>QF@#IXO^7dth08}WY z8y0JX#OwHgzxc?eV!>FY5*s2=)+vd0UWMIs8?M${1h&pkz=j8ih(I2C?)9;mw( zqvT*P@(JuWY7J0|04cup7Jpcxa(EW1$*Z*BB&j5R_#*|>6_i@_pWsXaY&;D{z*W$P zpxi!xR;8j$3+TNsx&)_MFf5?=XEc;xoi(bNS@m`J4TWc3ld@IdP*hnfaaqkTrZ(5r z;VV{-vK*!XJRCCF7*w(DLnW%SOQMxiD^6m^Fp{&0qMbnrjM35xm4B8xjR}lFqrj9J zI)6QeKB%MsInX={^&AAhAV*Nqhz1Fka@gn2k!oDijii5iu>eBOsqqYWFQ%oRUR=?< z3js^$a4A43|GeU2F&-&@8eUuBMEi)lR$u!@`P#eITzl6wuH9Fest<)z_2K-Bgs(1K zQCAPH{ThOy)Y9+K4}Xbm06A44KDkm2;j^q`JHaS_^bRaRLijTBdkEn^dZ%0vD1<+!%-os)nbLi5j3mO>q{|5_)8kSh9JQr`BTaj zw$QXC>~d9^Y1&)`NA}XvCG>g4cKHtL3+qI{b|Fs0h^5ZcY<~#c?>+?FEc;psL7ayt zGZvVED8ch(Hyyy^*ShIbN+3NGp8*e(zF<`*@9+@G6w-&hw?u~=aHWQ9nL0mDB5I2*!N((>{&Vk{ubHy zq>Dwfva(oSqJNHGzLj|($+>$8EpO&I6NM`7qD8M{_L}I29Pvn%`5j1jFh>sMk6N4% zvpT_dqWS-aQ^b$JIGJQl&k{&f_GN%S#ApfGW(4rKLXL=%zNA3p`YVvS74)SX%q&~hjacBBsQz19I&t20;_qU+ z0L@>lk$-94l9As-n!i}Hisnn`bIlpSh}}W5CcR!{NE2mnnM9XCjppxObuQ+|kCliX z{a>ZO^3|))u`)kNV{Z|ZTq$c5$rbViyOXKd$iQgTK&l-P^&~Pf+%-(<2O@QNOKnhN z8%ENv)zs9OQWtBW&hybn7ugR!GQTIjcM+DIE`L;m;0k>*Wu3%gF9~+LO0IobF%&?V zE2m}KsXu^UrCtQjCzEoy2n@kV74aaTW-tgk<$a_IE*=$$J_0jU0JUlaPH)sIuG8LGwk@1dJL9jMG(l$CV3s2 z=mktVi*Oox0nMJZZUxLdonE{sSHpk|N0YI_PtN7!k05|sW<7;zQ<7xC2vg@PYKs_2 z8J#AcQ?j;HPeCas%HK;+ zjCwJp6?7_vn$e=huCZEPtnRCA*?-j=qe&Vd7DB}}Zn`eJ@y@MLseOL)Z?I|=LlND2 zi(akd?4r?W=GDJ$njGqI1u_xI8E}%URnQ5nmi4;DK>tiy+GgRNk$dcPl^nZzd0q_rEdOAKb(~;fT?yg*abN`n#j2b8^3_Y9O z*AtKuLy5*6y)_i62KZ2+GJiI1JeVH4XRONEvb7<7Xnpj^@cnz61c!sx8tnpaW=NOc z>24XVO>R!Rm8@jsfg<5aZ%J14Bpsd#55ZalM&NV?pT`v0aD8X)jZ_3rAjl)=FY@2H5P><4A>A*59(6X@cOr5(F4)ND;F1N zy_N*BK#l%^Fu1B5o@%@9ZAQrN#qw3?`(19|2SDr-1W+wL00d?-IILC{Z&gDvs$$fi zJ>bE8pXCejdWc+&rGKR$J$``Hq5u2EU`?<_V_Cqu&LA4+0*ah%XdsesH{;4NVTGuc zE3>RrStF*zAgKbR*IRxx)lX9j_>cd?YTXv>EVQd zB4Fs6FT1-pePFQ4-FMTdBO37XR?%U@>}nlN82EgXtJ^#C?SFl>FCKVmrk*j1hJcGT zvue?5a<=d5Nl&!caY7;aFxKf*@mAg&%%4)=$*FrF-#@Z+3I7S<1iF4FvPb5g70nA+ z`#FsyX=)d+jtht;vQ%GBc){bI;kRTLP-95~tPD!Y3!$zD3ko!b8QI9W9KVHt?N>y? z>WbcqVsiVc%71*yB2p@jkg^n({FB6?@6L_fI+6Bse01>Urw;}CTcex`B{gcwpBkv$ zc5F0^OWA?VvHd5u23`>B$FjcuuC(OJY|pe#w%E~cZT#jvUSd@`R=4ix_r@8!$fS1PKiRxJ>(QDV`lH|9Uw>6Sv>Vb+Vu`|!0NQaN?FHIl zvAt(W(!?!by=M^1-v4%EroMk2gu-qN4lm}+TZp#BvWZQt5%+ZweA%c|DnaQlrtzARNmzukl%1Buf82cC z)BATmG1pwN?qFxrM8;LU`>WGC9-4@_TDLWJ&42aTciU8^bz76w+^{v< z{~-GI`v3mK1Jg|u%kwr1v~jaM+jr#Yaeu}pXd9*<7#{t`jh$oP{`m0G=X256;O=cF35pL&k)r0wy$F6%(3Dn9%eWVM6@P>fCcjZhvN1MeN{nM~*(T zyW$10dEapF-d3B~Bp>aVK3Bwq2j760@NbW7erzV)u>X+K2}K2vYk$Lq z6g8!SJWfhQ`@M8HOw_=`sKb z8iruY7jvK~nOFrKi%a?{xY2(_ZITt(_zePZs>Os3=qWhGXi%|tteTzL*}Su_MoZ9Y zf)dhW^XV;jZK*W3A0E4e#R2MB3mMZ>L!I7rW6q!pRuGn4g4bgX_T{p5yMOl=;UbFc z0ci0A7=z6vv{;SxX0!wA{R0{Qz<@u4>wrw(571&G*T@M;87=m#uUv9EiJo!c3!_E3 zcTci_7S}r~mvZt2Rtx%Tp~d8X5-gr*IsEK_*163M4A2O5T6JRmOnY`$yF0x8=5@D# z@luksPCX|>#rSZdVXD7|f`4&EIHA-vY?yBwyJstaiers!GecE(Zhm-AozZS*bOxKz zYjOA;?v{MY$ad8@`w;v;=~&_AkbX_*?UQ!sGRSs9o!JJ@){I}426Z-lZoM<;0?2@R(`@uOF(Wd(B-esJ)8_0u4 zWt{k>)i@DqAl)wl4TKkyFQbh}CDH&OAkZHw$JQtyY(ZHo!;9xYCmAT7$+;VVaJZ0@ z`(3Gbl>e|=cHai>%4BtB1yxA{<#%8Fc z8T5IZrzbY^^i1l^-X~`o^kz}39)M&2llQSZ{L854u8o9soy*Qe3xG}@L%<7w2JM$-#eTYtu&v061>N}&4_XkP+t zNTA^a3b<9<`B?;oraG7{SJ<@UpD%#RATbT9Sy=jGh5~Oi4NI{Y;V%Ht_A3Brc;dZq zcw*w`a_*aqMdiQXjwWFsl4A`^InW($>QsQF?Oy?_|UFtQW%4+rPiBDmJ_u4BAexCZCtn5s;E@d%cxZRTvWPjPQ;&=L1~0wOkTNi5~VZo(wTVajJ$0cx3Y~|58Uj zsfQ7_Mt>W+u3qj~JPAA2zl60R$kKafA=$2w@vCPI#Uooh7V?*0m8)fCVRS16uaS*` zi)KqVoh{vTws_N7xOg}U#&}YRW}*r}^o~^!y%@9oULi=hEHh6&uw|L|a6KU5;two? z+f10NWFX)1mje0af;?9s`InmJo<6YauMRZ?`+p8~G>ro!zf(r?m9V%@*GymFA8qyP z6S*0H<(qcrLhg<|?dk2!j=SzWdI#FS;f}FLXym4KMJzu&o@~EyGr;nP(zV+*^f+ZK zzdh6*Geb1rlybz6EPgxE*V^oIw8&`w0N_m{fO|hJqxtrNrEN8uZ#(CM>>Kccj5?G7 zcz-|{Du?uC%zFjaL*5;dyJ&LG48#DS(`sa2#<{Nw?#ts2i(7e=L*AzivEf^u0q8y( zHK+hSR8gVyaP`#vn=7%z$;sR!TLLxvzjNL2ZR42$_q;nhnQk3#w3>m6>+fxAM&I7> zHwAoeFt`k`5Iw7BdcXYFId$JSY`&e{vSG;LQn~xUcUE}tE zwfW&%gkL8k{5l!o>k8F%byuyft1DI4)%``)b@2~V*F8IP@Tpv)VfNWsc>I2_Yez%- z?lyOzYeyqIV&d)ZJlx-U=UWfl{?0@FnLFP;dH9=?!G`^hj)P-G{CurIMy8>;H-B+jF0x-!%(}h0xi_9s=L$Wbu{Uw1SZ%+im{WVD zvA6E?jlBZ6NtIT$cn@?P5R_i~=cw%m5XmmyZKNyTD!TU z3Z|990cGpJfDYU^Twzc2SAR84X1!>r`-YJ!)+8|+-ePc?0SuF(*hN z<)0I*P1IXOlMWlEb(FmB7H?FtW^8EjXNBED{}mSj63Q0#OMnqxPcLKkh5}}UcVKhQmS5)NE`+#>j#l{nRgk*>O7kvUlUy2qkie_+|AOKu8XlYlG=Fhu>9_b^z>R8vT+9{h zyp3?Pgrc3-kl_(c@>h`vPdFSI?`3{E^ZO#p2am~JY3emalE2CooAchwIewbR{C-&q z6#IN(W3PV|u-;p<^P%ycx%CYmmJ4_8duY48wIR%^Fp^SHuIg-PV5ZB18SC151Dn4* z5_ooMkFB{eZhy4b4aegHH6ps9|IY1ouJqXKvEHu#_LbT7F(u8gW)TKhQ5vdY>PWjr z;Hc=x;i2(8YEIDYyldF)ZXAGg6kYnGycSllHe|V|E2N0{!d7%N(a?v4=GrO@H7&(Oboeu^Pa((dM=-2_s6k?(Yry zIwqSUL){sNIhEozWL}*lQ{)%~f0TtiQfGCfVxZ zajUaFXtGwyxfmm0B>TZg#*xlKuJ~yO&`SAp2!G2WX?eSt3ZqeK_#~43mDj6`OiP!Q zYq>_5`BwmKBMqPAyxCvnL&Y_b;b3U|g*Y`|-> z(1YOte8vM?WT+fyLvo)iBdoG8fo1D&?KQ2xm4uom7hfW16`@em#Q*)6Ginr&hkv0e z0_gX8(XJ<7QG!of;chod2JU*pDzV>xMU65J6DKe<@f5DWVPL=Vvw0xwmX?s+;HlzZ zE=c?z)Nt>X9RN>70G=Z8ch!)d8vy^xUFpw7OIEt?$dW&DSrPtXl0O0@fB3&3`3rWs z9M9aY=3OSy!7KA=ngtk{P?PAbq<`LC>2O8uy4$tJ`~sH$F1i&Bxm@p-iif{TFjmoE z69o;ngXRjNaB-g#>sx#u+JJ$~uPT@MjX>sy;vsp@h5CZbZ;)kvq~&9`n~3ZZKlyVq zKhjW;`RBuZK(&BwfW@Xf>8*2j z^t9b~$8{TPRT_q-w7iCw7?Re~sU1hV&7tVV!$XmtxHVwjacsovYkyh?rTjLqN?KQY zb2CM6ZcEXdn>Z_7Rmw{RDKCAllrQt&4 z^nXKZltn>L@fLkS)Z@r7n7?&k{ya!E(pr-AA#@4RBVM@wfmQ3U>cLA@1NRj~kR}Yz zaH^W7x@gEt;(_-~(|_p?@INEzORY_T_j9IcJpDlqUnA+ss|tGZ^L@JO-gsrp1Cuq5 zmWh_+P{I=4bZmU==tyAy!Io`l$D3o@Cbk+QeaX>Lr=@XNA}Xu9x+Mh6dUqRCnHd>TmHFbui$C6b z@!qbTzpFB zExC-{n$ETPM}L=$DRWh)!taw+?N1(MhTw<}#O++9=U@DOP4Uf!ZStF%dS-v)r51(L z{XPGJO}NE3%MIH!@|(2@{F57RRr8IvokFaC*Ik=p!);+fN2*!4>~0>-F1dUt08E9h zk@)&U3w_D8w{IM|Y@*rEJ>Y=!DlW>Jjp@oM5oIOg0{JiJ^7l z4gCnjrY5c!7(R^0!JeD4o8inIYF`vh4(FYLnol3?L$Ht-sq&7QOhY#Dkm(ZyW1j-d z34S~X569_AW&WOiqw3$nqq`56`Elg=!JH`5lOL(yF`w_&*LCoz>#sYtzu0l`$wSw} zt{(Bl#;zD$e)Xu2Jbd?iN7hgN;QjC1`~I=@lMlZCZT+3k9d29rg{Sr^ z$C_UsZ6$m}zB*2Qq*&5fMOM!`tBJdj)%G-=wI9*5C8v_OyUt4cf8x%k*Qo~XVky1b zDq(j%xV{1?kW|f^4^p<1xVEO-Sv{-Ux#3Y(EPu9C)jKJy-Wf;gooF?k_l$aHzCHHQ zY9plNNhwO6ajfKt){=eCD0wbqoHc#|r#@tzE!Ld+K3H){v*?7Yrq&HZX$HPCO{#Tb z>*Vgvb+@feSq5*q^fZ}92HFwHY1C;1x5ebPS`C0%^X02IB@zo;{r-qgE4s{_Rpf+F z(0|^z=9>P_>+U?d_hp@=#Ji&oKq}Q)X$$9Rdsfw1X<2Ng{VUSij9cKP!j_-ZO3PNQ zw135FpK)ZRWoH=o@2&Yc(6aZTz1^2BZxOXLW#HJt*zSRvL*Id#m3GgJL*K@cyg||Y z7#o-G>0bSXS%*HbbIBLCx0&2t&R}v|0)GzAe1AS(<1ejfM*e*Mm=A4#GFT;E@&z5~ ziG70|mn>;vNjf)C_2>IQrJ_w3|81Mae})?V``bwiw$MUCE4Xecv+N@w;K{0$mMyX6 zy4n{)f#>I2I^U~LWs;t&CjOJOXWyLh>U)kh`BLsc*26t#G$OCQ)!=?`IO?gqR(}ib z4t#3CWi@)74w3v{T7$0Y*!Mdh`SRIOh0Au*9K?b9Yr3E58623K(fu@(bg9n2B2c$P zp37Pv0R?dzJ%oaP#ZYhwvenMzr%#Or)w3EP?BUn7b8(u}s3_#)$3%O#4qn8^PxN?0 zGV<~R-`(}yJuQOO%Ib|ylVB6HR)44L@7+1lxvs;b!j68t&#RNL&GsIW1fX%#=iypG z@$@6r)YC-Or|kpkAf#Iz3R70i;$ZgvQ2 z?4x!68o`Ntv~F37eYC#&&sQA!QPoGQePeHM)<+9c!bl_liz#877os*)3sIXyu|Gq| zUYZ1T!DmqnViSrwWkTjmlYflPo*BGR?h)Ro zhCdQ!1C1u?;@moFASLv1AO&`ost%*5sGp}V{n6oT?%$Tklz()?wSVyVBhHa%Uzl08 zv%})<-a1rV))AF;P-~d-T*y7tF5RodR z>ttYQOqc?i8azST5GCl;>BMv*U&|d|H6Vkvvw?;cRY}*WGW-fP`i@zoav^2O-`!P? zdi8Fm>MAE7fTmIbl<@9vcOX|e#ZwNWwC8Z27-CHz;RZ1m2!FdlX{zS3QUVq<8Z6kR zZ}H+dRAh1n&Q*J)<#F;5!B$DgqO1}ukBI(BVIDHgD@deL%@cw|@!}+KC9g3S_mAlvyNpvY6UgMC~k`a7qknLLF9h2eZ;NV;$e+jGiD;!t-RVdrseD8_zU34KYr4d7v+ z43DZu#-{+IIJ_8+-1gFp?*+!DD4e>~GV6DNLO8SmmVf%RP<1w*+y57zLQ<_Ywl3RK zs`n`bm#@5ITeI2a;jmMo(;I?lby==?MX3(0u56B?-6v7(RLH~Sfe~^_YT`--t!f&_ zkgrAtB9IrWXcgdHho#m~3WSp&(g1=H5DbHm6NDWgfW>D>1|cg5TR_+h!X^M=1Z0vR z4T36=aDMIfI=2p1mQE;P7ZkCSf_v zV1Im+rv$t4K!o1V`kohMaikmO0sFO8+?N2t%gX4SUi(sYpjNv9^}~#3W*_OyyNnbV7QNkk5sF1xpH`;3F;EZA2Z zG$CGAJTxI*ehy>(&2Xe0!&v{_s!SH4jDKZBA6Os?RgCbKA_Otq5S3t+@c5sYSPc-sY788lGw;)TXb-6+Tes5ceu}gflCsq%I!{{!Vy&{S<72 zO|Em6Ayrs{doZEI#;k?NRP?hSyD|ySCItQn$3~jrbXC{J*_8>eQ&uKXxHL2Blz)|p z;@t=-{f|V|qIib7Pg$A#D;^=nSPH~xl>a?Ofp{2%8PJD>l^2nu4^D}O+HB~$CJS1! z!`Vx+R3Z!DvKvUa5+qO0;*aq6B z$V7=thd`+GuGgz+dM}O^MOtN1q(T=70PvG%3dMo+(#F1~^vZIX^B*|m9*g2Y) zc(k9zlWE&_>-c&8ZjEi5`a((6S3~49`Fr|bNsWqz$nYDmBtSk7uB872OMlc#$td{( zxgFQhC?(yn)k<8VRZ2qS^W-V|HCUokO8Q})3HtZ2gi%UX!;-J#R;*HzgRM5;I{KO4 znkMhXbqq>LA1t{EmvE>AukT{y@5u$&LvJQt)JYsK>I8}x^(=oN@GO1ZdGVs2E%OKH zz_Vp|tF|NxoxdcSqqZe~AAebT{p7N1$HLJi=(srY6??j`A=e)_Ix>BUTwg+Za?M>k zTN=0Dy?)i*J6oH!-@SEwPp>O7w4)myo#CM!IC{{z47idUuI-EKPbO!ooHPZfs3lW3yl0)BRm5r235SebAOZqh=*X5E=* z%H0QO%TH$*PMI(3@_EhT*QWXKwW-cyY6iQi5p*S2CKg{YkY3R2McPwFulDA8V+)qb z*`dskmO%R z#P()a+AF9zxPO8%>D4`LsV1YunF|DOa`xM_}k&hCD^~&9kY4 zhiW?28<9<2PCcWbwyT+IFJ@Dhh14_Ux~lU4-Y47pLgmR*dc%ILfCquP@e#JnbyD_X+sZefKT4RfKr6b)b*_0T}hYNOzH(J0I5aX&iL*);~ zlsvGb-+!MiZR!nZIgw%D@)yqC8eZafe^b<(@o-wn0v1|LTF%P3ybqJW)3yxF->V@F zz79uDiYQez=*J_?Q{YDrGqTL2r@*n2m0)lx@wW@0a5NsL{ZESr+J90}?thvR(Z;9PmUU!J@iA z|5&+ddjhomEz3K>)U7ho%5;ogn#bY-E^e)!yh4lNIeK!)6+Dw^*&m+;1~K=at#)c^Zq-{i2n?uK6sd|rW|VkX7-R#b$fYd4H?WTxX1 z>|HSv07ej|YQ@HF;9`T$^BV>R^zI4-hJb~*uN_!7+Z?z|j&c5{*=N2tb`ud1KNDPX z;`o~kIi8i{Pi?ckos8Z}yTz}_;%z&@Q5%Krx9SZa_h<>t8W@f+`Ols!U*F8mP598% zYTvrX%Ej?FmwaDN(;{*_n+Nzocx;3q{ZySs^7j5|7M2vIS3C*)tw%dxm#ryt3+h7& z9xISVw(kP#cXFv+y#J2X0~GnP2xoMmqo~amM0rTudCIy1?AY1ZQ#E2v5(8?_A#MDL zOr9%-y}w__p8q@gw4Oxmy&v1FtYl?oZY~oS!Q7Y-t?kUguO)tt5kR!}?+Roiuf%Ot zoDH^f2Za|B6l(46bu9YOj8u%vBou{XeQ+TD24Vi*mKDkB4TXIEh*23!->)7K?)qJi zbbI_1H|QmXQD*O)f4W)%=yc!b1O>wQrv^EW+sruh4xa$qr;37Uv~fP)5dV3&auxn3 zr1s=4ODxA^3iLe-2>d$217t6fSb{>wUF$A!E zNvO`&%2P6SbfwkS^KVVUherPj1{?+;kY(;P_E-Rlx$Sm$LeJ*qpXuOAH643(k40Ns z*U}L~%s0s|SH{*i0^QDH))z!j)DL&R+62*&ke&s{$@i3Bj5sH#<@+;MmpJ8bbfiR2 zdcRKY{BNYjc~XFOg!lG)Q1$bg4LoQ=(YE;@7YE(1mm{t(h`+a5NAVaq_Aq zB<*RWvo$ff#kytLSdyw_F(X-ZG}k~(typnnTz;iIqC$tCP1DBv%jqMi=e!!B$z=oe ztW|759U{3k(&A=g!gue6`9~fYTawyfN?80oE zxrv#35sv^5u0@4r`_xg^KxAhX2c4DV-l&!u$^_ZdbNle`Tz42hm65lU@sB9Dg@N?$ z+pi2?mu&;+XU6Vy=*gT_@FU!`m0>V3t6{+4@Viot&*32XV$a3kg{! zO~>N{Wk(|dr?2BY?XB>qRNY(VRV1NdDyoHXyNvS%R)@!+Y}xYSmP}!J?+u zBgHGj#JioYDO0XCmD~x%|HUg~Kkrq_;vnH8=d?&-c@KGi`B8E?(>;dnA4u$@?!88% zMG*kXr;Zmje>x#Ze_9KC5AAEb<|~SSC-^2Dq!N-a>(5v14>bzfW`X39rFyWM+C<)K z1$jBJy7Z2P)FR44Rg{vr$qt*|40)TZ^JDOBBIwL%;pZl=Wx}hb;)l-AJ|Eb&I7V+1pbX#_^T~LM54GmRg)QIQG=!3AGyP*FvmxI5u?fd}#z!=dL;0+3>pP=zi z&so7IaMrBJJCg5nz%WmpbcP3MnL-KR7`lm*<}R2682p<>IzoN0V8DWMe-w>_Lco)H z5-)7ph1`D24`&M8M`jWWO8276Xs<=MxZi$@pJMvlCAVTO>}(?;f?pY|FY2}Q_1OtU zO7(fpSSk@)EtUkv5LtboHCp-{NbG7SU20{tZYMZAx$=2=ES$-3x9w1kc8WyQ`wqf8 zZZC@1YH46VBpMXYmu-c7)|uV0$C%;kMgbfs?324-DGw0GMPt&oa=)Q-POFoY0>&kf zuu_so7urL2acd_=EIKJMDSx|pw4y48X{ut0#}bQD{BnUN(KKK|KPy1=UwCYp>vof7 z^76-=gQSzveiKCilND+qPcE5SD$DZuN__y-jdODEaVGbejSIKsMY#_~VJuUq+n(RV)m%0oA z3z%I?n-c>bcM0gedUK|99Xqk?&OB2}Zggy4FNwb?`FuTdsa{pDjkv3hk%k8f-Ly&w z6wIVYKC%-g5#w#0(z=%PERx*&s+hiE`-7 z{xgXbOVLrS2j=1hqA~?fixsA4p#N(XO?7u{FxUX7$WF%8`pScrXU)0J7%)Wd+?xz_UZ9CJowvpmceDw{ycCvnS} z{7ip!jwPq{wUf*BU(JovHk~J$g3sn~V@uh_7W{JbW>d5J;5j1MNiz!rI1e9Lx?Z&C zU$tc5i*APG0@ZwG9iItPP8QXCtgscuUsR>z`$n{kZs^nLgcgmWiq_4%*`W&s{@c8al_l(15h3td_Cs__HcMn8 zqPM}AHU4B`8CFys;ld^?^;ueqcGQn3ny6uYU{Tf{u~#&TNs*d{D}MS94ZetPUHC0P zh*qmGtW#{1VnOxXINCJ0ql-s-?Kw@F2gcQgn~sK&c9ZB!f_A`H!H2*K0ReZ~Mti4q ztM3x;6R-7Ik$HmMm6`|ZUyRgspwm;w_>5Y`FPE+Bpf`>^`^B_pAJ!;$B*f25AWAjb|IhX_|wI9+!^0N6pt37EyH$!Yg_4Z z0S|P^baKHA|-Kw)J{#@<9`liA9AIeA1MMxo~{mS#~tT^fh(nT_BNO=$cN+) z8)63I%hF@UNo!Kf3^O&eJt@Tm+LD~dnJFb>vpG@nwokN~Xv%ncae}JyE@p>~IhU;s zN%=auck-#=!7!9?1&+w11 zfd2QHyLfu!o}!|BZ;mfjGbC0x-NiDI*kc~dj6}mtZG;OInV;x;N+y4Rf1r{eWgQn# zsl3;XP}Kk2VR+p;qe_-&syY4{(3S)54#;5|@xUW3yfiOfCHlXk*SG`$3m5YGj$ud2$N+cS(prnJ~)+hfFUWQo~Xy!axfR? z-Buq5MLr+Pm~&Y#{Ve*D#jA7Bc89Z3S3H__SE1)MUmFC&V=h7hGNmv&#FJ9Dd?dUh zE6m0FFPAA2l{VWXEc}MLi^a?SO`#t3dXa1>+*A|=H6TwxGSiE9qFaZ)_a58t5=hCv z3-6?0?LQbmuI1zTy22ATX)XGP-}~;p>G4jRLttSc*XslavxNvw#n&1Kp3@jej*Aegjb={k`7K^-%K941lbUOkdE7<<0y< zNG8Ygh}LpoxVOLEq)an8URcVB>7v|3iu{@E(uq&YOT{u1$VmdXTDp`p zD999~yL4SpU&ByP6I=Dm4`&-f?49!lqs7UP>9?_=Hl=|ak4oYB|k6U8rmS3s`PnicsJRlZ8t9q#A6L-b$|?HSmgUWtgN8T@=(XOa8H-(o&d;e!)Uz90?t zL=AsKBp4JbRcdWTX-#ZP8>P)G(H*mDN}LZklChTp2Wi@s>pWY{#FVcSn|%&1;*2mF zumT~0$zir9hrq=dx*vgNS6#0&Qv3b!c(pEcpc<$Mq(5=1igbB{3?*sfI*$dNuh#te zzalJhO&?qT-WW&7tVV#9)I;%uYKS0M1q^}yf#Rpwb<7zNf`eDY1tGv(>i754&f3Zh zmkdTT-GQ5Om4|iayP3N~LI=QP_#Ith;(;;&j~KI{VuFQekOOnUWnBuYZLs-rn(`Hp z%y2eW0wmaxR5emuB&WafQB{wt7R-6%pGDO-sL zejykleF}T0&In4AauFu$K+fi7(*oBVLJdiCd{fz_+zsR}#KG6?^|A-UV=7YaxifRY z5~+C$N~rr-Y1Sj_OrMjcW7}3Va*HT@sh$!<0`Jcksj13kYHg5GU?#{s0#NQ9CNhw* zE!H9cvZK-+b%$~0pOBV&;Ak* zW4re?J)r{$R_1Pu@uV*mb{fApO^+a{A;iZ4wa^6pYjR3>%nh8j=n7;db0lRFdbDUv zGzhpN2nv-;PBfXQtn(`escanY1)=w>Ri>c_-5Rz?etXAad$m2S6$UZH2#DV_y}5HP zG^Tj9^^wTDtXo0W8&@@A3k_Dn+&8IemV8`Kl5}5!f3j=W<>;972&@r~%;k?nf0cpk zhzk>lq0ehh`HhqM?!{W|!Aki$KuKCu#!p0lG!v^XoiU;@z6E4kcz( zA?R#LRzI?pkIgk@E(5{r6?6%Kkh(s6rIU?UM>Z{d6uXa9=ip3YWVGk}yjn?OflebW zI`uK&pxWjQf_V$N6#YlcWs*3R^w$hzES8}dF1N_-rT}jm`pA5fzC-7UP*B1THlWx| z3nJ4$^KSkZo#sf~tAiQ2KCPF(2z#F-vKVu(_-`ihHF8R|ZV+tO#5xqfRc!5)0t2*p z1|%CTM3Y9FC@K-eat|4-rG+Nj6x3ZC$5(~~jnYLX>yFEci}y`PZ$E`kNEcd&zz1s` zu>qMPV2Ij_lYweHX>i-=nz zbH!vGCCq3iS+0=w@mU`b{qSBaJ#TymMIUx3o!ItR`+{(@)6CAJca*Kp<7_p@Z?^e3 z^g0yak=Dxo&}SM>u2}muk`rU{l9|Z`Fje$%Ug7en=3&PgQ@AG9^0i_}%kF}kZ@^`b z7gxvqjeao*RT*Sbi7c`QJzji&Ds7SApH~O*UA^iC8`QjLHs1%-fWpZ!jTN*(#re3` z>72G~599Ek=1d&7;7Y`IKb0q{)c%E1fO>3KiTh~-(-c=&QMn7a=Al*cBQFWem0AAc z4y5|mD>#Z0RD-GBZW2x^iKsVBg~#*f2@l$2f6OY69DgpIBa3OB7w2kw?ug|`6LAx& z#>2nilG$ey^z027ofu>vlcL->jWC~PV9I&_nF1LxR-oSL-3yY-iU2sQJ9jZjvl5E8 zckDx6J82y>9oIVkAOf!UZugEVzJ0jzDQ4Q}KZBJJ;FUyR4cV}nBCPdU=&26`=wvi_ z;ddr+H7;&ejqe>kf5xvGu@o6wNngw&Q>@spdmVAp!(9Tkx`b{p-lO-N6a~x7-EQqo zt3~T8YwI~&LmDD8^@{ZL^x^QLsG^q-+ukM-Z}y?+X9xgh+P6$3ZJy(83lNaq3v57%FpO4O zt^j=+bh_{PYmdnm85o0we$AyvzwKA6IsriueXscV_9u3uqq8qeSD~Otk)+E z-9Q5}+Ja%xFed0LN7z}f@n=|VIgd8H*HqJc4f+B61>6k}%Rx3mr0 z>+4-Rcd+S~aS@OxD3o!cY777ZS^JCU@2MxKLWnCEYL|CijeeQ>>5Rb=s(Kk5AnIH( zl!FZ>%Xj6l`6EXa(Ahs_&RnDXTa|t!ZF>N^r75YAq2DIjl6(+|CDEY#O_(W(6vM+S z!}}=Hled_901#bVSl+ozoAq?p<%+eXytG8<{hHM`Z1b5+Nlxt9z5@Tq?9fZ3Z?$jt zHQOs7W!ao}%Iq>3yc3^Xu1X`}@OgCH5$s^YU0Yva)_G`i0iWNt|GTJB*D<%WMllK$ z6*et*>1t!f!n3_Ty&`2M4rV4EBB`0qiX&u9LnT1sO}e(Y#Sb3ChRtAJb{54fZdzO= z)tenXv0Xm#=8@}2-SqNuua{mJ%(sbT59`!y?UpZB z5yKG4E>BTEG{o{fDS;(|17CA6)1m^hxl2cmV>47+*^ThK_&8p)KuUX*{K2-9)#^U& z=8Z5#5xO3_Psg6IoS3{lth_Y`o0#m9Q^TGj<|`K`u)px}(P&-2vbTNnUT#@$9X#@* z**;{E>OWR07#~as9S&PLT%bpe{n3L;=&09jWEsoH+tlvX|D9`s72hWzqUHcDYH*kG z#9UN=tJzVXA2;uioozy%G|!Dr+2U#Fz9cBru55YLn$6WAmsL}xx@M>NZZXJE(I9@> zCM26B7H#LNST|cLN?GYKQ00(SFBXga4(Fdr)W}u)P~Z8oxRKq>tXSx|Nf0=L+FD(o zR9P31a)h_fHj z{oapaQymUb6sIfHB4oJvHPg-|EoSeN;(Wx zEsY01+fjO6)2nNE7^xaQR!P!+X(kh`$H;i#GD-S3O}dU?$ZU3&dOQ!1$>lg9%iUY} zlA4WdG&UEf-c4iv{b98UQkg}$TzOt*EsbUx!a&HrVY>~GgKzt4OO>K8d8FL6R|q+y?E|_zRcu7Gxk(}%9=5`` zysW=Z70k;}I*+vZWJ)IRE*aIBPIT|CE<%Ti#fM6c4rE)+@6$|H^{ilG$u-QudYQsa z45KSKT=q8semgh)3JNMdVLI0j_t&8@78X-7lhG*ku@Q;#iwW!>$d(wBq=IgXpj4S0 z2W)n{yn{+xo+|tUITqXbhhxN%HGtpIh=RiwY-{yP&Ii;(2p`HoLMBZFy0*il@um9O zIm4{tjZ#PTGPU2;sOY1V!GE|yD))}dej5li!G0OCEE-T{=!^eVTNm8&)B|zlJ!?`k z`D>jwS&T(|MA5k~oDv|y#alktMZ)7T;^}!p$P}9^$klFVo=ofP4iI}zgW>%_Wno2< z5qr~-vYn%!n3;zF{8P73@0>-Ym#En=MiLvIPdcP($v?e%OWi>-NG%##s&Y@W)l;Oc zbyGxPJyc}ep2Nei@`Gn2ZIak@kjPsxC#loLcyFRA%`?b@Bezs(98>0OH8>qMUDnmv zbztJs%N!sl>fo$1Uc@4;{U?EyT5_(nM4d5hWTgfBthEUXC^lh+s_f)blT>P*?E!u(6L(um&?sY z1`eNAhY_s`IHJOvPxn&oKE8*^-5DxiLFDHm-@B*t^jdMAjoz1gpJG08Q&oPhm1o%G z)Kky6u4Q0HqhrHRWM7GAYm2ok&$XI}bv80OJC?p+lV%|LYn|s;Q+fMlR{rb)mb^TB z(R5Cx{QGC!&W-gOl^HZr)k(`L!1pb_L3M+bw&U0c@-@ienoNF|1h z_LP~vi3Tk3s9eQ#MlXYag^I(+z<|0oiv=Q=CZE7B7ta|LKiWjQ)dd}#k~mSGnBceM zq+vI}VsNlrp@wEN%6`5nlVPDEUMj04*fa46y4>ZxdlJXn-6q_7z-W+o`25*K5 zt1{Fc5Slwd6;kbsv1;D@9CZR@sIQkSm9{ylR=Mj8k>H zhWT(C^+88a)BEAKFLu_R7LmBWaMCwXdbELS@X67p8}ON_{7~{cn}Dr7&id<<>Saa# zR`4&RNYjVQ&>^!UL^xwFSU1@kX+jgspV=BIpdqBxWALUcrYltZK38gj%Y=})P%l9_ z_tKBRQ+Sc{P7`bPq$l`b&j0JxeDU-ZlU%=S+I`-I| zf$x}S1|o?LNJ;~Lg3)B(o8Hl0_-xzhS6RNs3LjZJ7I=+mRWWwr zkM->l_ckZlyV!~ei<$<$sh#?~bE598W4a}o-3-sx9U#}OQWF2VR3l~+^b=D0!fcZ_O zu9j!BGB>ooWe-2k?m_7y>1}C!S^=UjnwVFUr{+tOf|Yj21uKs$h;9OId>7j=^FYpOsrSyLE;9S!L3*SrZlxc!v1PH9 zibF|yxf;2M5TrVW5NbpPC?ParKvKB*uak$&*PW^<&<@=BidA(!4MOoIoks7I*b$h; zc8i6K<^McL3a1<{f>H40*}?R`%X)9kFjq9!2cTa1F?twHvf<-@_bV~Wm2Ma+Dbj2 zyBj$~&hQBzy4NQe1cmj!X;&__dVeMYjq+TC}l-ombS{CUtL6YAcVS3*D)|#TVgQ+ByZwSn`)yK z;gg$*nZ#P(Q$xG0!-)PYrL~A!q-=`Fmd;#F|4;Y5OwmiZ^@coW6UzlEcPjh;(_Piw zdNN4@t|q@2~Dh6goyn`CJ5%}X|u67Tvn(T_sv`K{V%vHdvu!x43mWT4rgCXj}d zlbM(0n)|Y#pQ>)R;5M&rZ9r$3D{yq8nN5m${R;_-w1p)&mO^w1)WH%ue>&iA&m&b^ zRKY?UD(s#GHt!Q<8CvFKMEgN+?M#+5c>>XaQcj>hpx-i#h$#l;s?j}Df7U`u%-i9g zp8P#14|T>LG3u=nlwU<6XvTSsW)6~&UxgcFLE}*Xr49424$@{Dq1R~zU+(xTh3}+Y zx5SO(*l${#gz74?T)*drWVfQ%wf&o?N{#+9qROtnHY+cFY;w;T`LP_)6}20?FW|Tv zm=}Vm;oS%4g%gcPUjO2pwp)p*fr1E2guWew4zLH0kg9-vj;mrib^AFP`LPQMh@NXP z&E8zaf~iF`$b^(dEjNgYxIF6%pVCr=-X0G6FdH(k4n)O>wOM*JtM3yfe zu2-Z^f16=KgFNQf)3JByMlRES)7|L zON;FXN`)qm8eSaa23ECQH-)z=P-LoDxD$Ryq(F`APmJ47Ja%Jy z>1^@_iAZJSm|xf<3RNUEq6Yfk_>JTec-;)Gk&CC6F~()cMx{>W6x(`4lw8^H-@Ck5 z1MY4x{{p1y;d~R7606$^R+&W#^vRb+AMT`Ly+SZ4{5Dgg*k4$zYAev8QO%G)ODh2QfgVa+ft;+-T7j4!cXN5 zs1sgDulvty!R2*Fo=jqU0f$*y++4}2N4$Fk=KckMIVbcLf}tb z8QH%++AcJ;qOH*TkVR^Epx~&Th&`zbN-3e{*W@To`94xV8|axy=xX3KosLs?BdoiG zx&Xq8Y^A?&*C0wYKYvB@DjU^ii_-qb(ikrnE4qrrA|!pyAHwIDfza;QE}K9WvwVwP zl#>OBbX$qV88uQL@Fg-QE)tu2QH9bnW zsi)VDNTqupYFCs;*hAj!&*{}YyOd3bK`F(*apmIfkA*z z-OptR1+w`t9T|W}=yphiudPyW+wI#AzL?gOJoFT+t#G$`IBNCGoz(v3;Adv}+cs#+ zh^^Y7w3R{Av-4^^zyNwuKlSu&8j@B4_(uId;)g;{yrw-5VCoR-G6^EXGv*a;-K5Sg zGjulLHvOWh$_pb-fam@ZLgoIULlt4GWaowZE}ICydEicN#L18qhxs!dat-E?W;EFK z{H)#|bN=(k!lzvtH={F(NG9Ktw^#{(&mK%wNY9omx?lsstdwNWAsABvwRxGe@(=^T zsN+E?Rwp%B;Qjcjs@pRG9aIkAZ1&agzc^m~S*a1~ZT6n^>W7={cAtEcpe; z8neY9z=$ol1>y;;EWGrN8P81JEC^tgI-nW9(2uVXS% z`=vRCN+zCFi^}g?t{=CAxJo5eOZPRCnuyT;LXz5VS@f%zvvK3hnbWJCzjWisHM5Je zRCE;g(!FM3d2|1tQ?f?v(w0N>>A>&mXa`$ppyqW0Z$+n`M$ORTk=-|kmSyAgRC;y| z6PHdu>G->y1Mh1}^%0%CyC%G*ol9Ewu^V&8(Axg)?+pf`CPlZ%>A|(2BX*vxO%vV1 zEep;y9%IQ}=Yf)}(Y0evU7jL`uyOmNbpou)%=;Ftb`?_&O%pa+BHy`d1gGTI{{V!5aPvpoY+lDoPjynFtt>x2}e0e#&N_2_=392yYp*DMYy~Oe* zi<^C3yKStQKg*}hx;(AuoGm;V!t_S}baai22f?tM_^KRYW7xdgsVc`ej$6FBTQQAR zyXwN$3Xk}!shF$Q4g)!L7S=7E*7Zmhfb+{XZs4V(=fhz9S(8@~9wctU9b4CzyE=Ux zS=CGCtUf%MLHA^)K^AooKY5LQSfa03*^UH`OBmReVqGlVf-m-nAQM^(`5j7br-Vsz58wpkaNh)k}zH zq|Bv4dTX}QH2(5kiJGx1S8v@egH&p2#_4@_AJ^)A8A1lBd-wLNX@|thV*2Stn1E2N zfIr(7oL(7ONX*<$kev5T?RxI`z;B@VA7}b3d!u@$8bp(^$O246a?!N@W`oL<##Mf% zaHD;t)D5Uc3#0l05A?SAbBAWHZjzdM_(HS;R5?iJ1J}rCs-~bvl}i-p6wXG?$bovo zWt(|CXYgA^$;jzVv?6V2JDXo=SN16HpjX*Q;Yk>}bmNgE?r~0(_R@I5z^b4f0fW|Q zIWUz?kkNb^dTd>iVW`fZ3VAyF66vPS?%bCOiRNwpa|=DF=#`7saWrIsEtHQ z0`wY0%?FQ(45;I2!_+70>fjCukQIpf5UJ`0=wR&XqQoaT+VjuzF#^Qwpr*z~jOcG^ z7*4XHm97ZJXsGnchq*hwF{DTnM-SLHsbix)ojt7{E~Bw~Ybs^u|GGc7DURRoo4n^NN~-T2}4T(M{S$zd*j|HRQv+ zj_cISXzA-2s7`}Q7Ux%aLuwj~;ZA5c$wr&M=+qc_iJJ?n12HuX#&KooWuhRNEf^;0 z%g4D=Fr6cf^|g5jE+xjqP;~TX@)04GM*`_6s+#gxYjHY4heyy%|G_b8T2(QLvce0O z1ynMtY2e@6$RH%^iOZXdd#Quk?{qs2KVzQ87Hb(hiOO2hK8=Maza>! z(gMAylAftgv@RF)^GF?kauOw~>#pHk=F|q%+`JiRon2q?a}t*>T78TpB>9MhC>>oR z$&|FNrpjMRxa2t03qnh&x1@q3Pu4FWAHqyMf4G0D16VirE8x2k8*O}FfjhrIF+br1 z@SkS^oIks;y#-PJw=p+lgyoSn zPcfdiV};g$#j@Wha4V}_V+_qnVyw^?MMFlQ7{tD1tN|m?jAB2P->Las8K@j?xc;m8 z-E6#!lx}7Oy!$2Zn=#n9dq>3DICp zVM*0Pxe22O!5K+ACB`Xis|cpVoSZSE9$!hoDO$Z{jx|-#7u$r+ULV z#|FzYKX-q@K|g8WXpZUOrD3|&BfSOd8%LpOud&OTFjekH3N7T$BGTyS;@3|A7ibSL zc#jY0j$q#t|E>@2Z8zkzAPB$`WXI#dMhEoS1Y}38uM`|`4`L{|3#5kzRDu|??fwGq zxd7df_rIa`cjVgj!MGIwea?Xf)PnRR`rnZEc_7{DoAsKp`#TcsW+FXXfC~!j(nA8= z{AVFNmk~#)y=Y+!33g?{0F9uzpts53Jxl(Mgu54DfG@~rKhS3)Fn}U>Poe*fecu!4 z?i<*xFLI9Z!oS@{;F2rr?j!`@#lIHmHXSq<{Z=3Pxdp6e19XR|?}>TW2kW*Q?0Fju z@a@tp=+4UJ3Bq8GG1=OWG>AurLUZUef<8@zXB*I5$lG?%T;$s=uxCH8=W8ec^7o>8 zsbId@XHvnQ8$b<7cPl{wr`vPj&mEvU3Vp5Ew}cP?Nn_wKVc!$WE+6!B=qUs*!EH1& zpxob)D32EG+c!tXU0Z0tmj4Y_p9l4>56GpODKRtUn}bE6$qdg0?-TkAJ4x-G2#TM=pq52gY3xlrNTd_fb}H% z*AfD6uOI+EAm6d_K)CepPufht=7-7)FYM0#EZ5yalz0C{!?>3IX$ z@w4g%d5!_?5d!P^<9}oO+PYgvIfc^_nm)1KFX>=`x=RlNF!y)l*tLZOtbq4?K><8K zd%{3>e)-?1_dQYfc|hIz<C)JnC{n*#Nf=_nrqBgrBKE1qpX`;huFsh`mRhpm%)` zZV5pF)F3-N6)`b+=hkh=CD1FA8L&=@-KQVE_;CE3k9W4vLOL zA?^+{ijE`|9oTuwQ0xq=RhFLvOeh9vlUz0cT5FxNm3J4=(1MrwZB)>bIDh?rg_3Cu zD8>yxgU5$p;I|*>MC2kw5I&%a@1klKTOnXFX;CcaV25ylIj9FwuEE?v3M4am=;3UK ze!#4JTwqqbuihsv%S;hM@qlwlYD1FD0{rHO2)tBzQIZTKCL%lJ*iZYuV~q)`Vz4V# z7#6m&BvHpt6T6V%du2tQwsL%Z<}iM!*B7Y2yzF7zj%U|c+^8EU-niF4n-r+rp_1uz z%FjYRVFUx~TdObwiv(V((Y3R%RZV=|vR0W+V z5rUZVm0(f;7fQwzehI=sTv4ec1kF-Ck;#y}hkQ<;OnB(nm|rnOffS6aK&<92WFEE( zaR|u}iM42DL4hPDJCdO0e3Xowp%Nk-9Jq1KQ^+_?Gr|Y*iZG~*Y|#fAY<0qwzidHK zvJHz!J$MMr6{$5{Fbx;Yl(F>dPh?5jhF*R_%Hp=RlRy#DP|)CVkvK4C88FOW27Ryo zBA*{Xy-yyr3JQ%SG_*=I8OEzHedu5fQL|nd>2eYc@c9nVeatseg~}vu3?(%KCEC2s zxt|E8THzfP@LTwQAsbb6Fk!TabB3ojxEwIjP!GhX1PA`m&>enmLndcvg#6=t;QYCK zvps%w=m5435nPW_X}}?08N~dNcPswtT2N4p^ISTpDbHrZ0;uKmTjKUBmeDB| zMEomDhLf3U6^WrCjhh^JKU&8edF1>G|LH)TrZRT#3B)4wxH{RaN@@`CP|Q4fwpqvg&Q|n{j@7Y%hOP{NrrL=MU{tIc;kFF;_FtdP5EO( zD&?e9i8-gpd{*E5Py(^2iReITywz=v^7amfB)e};S*<~>f+#2eIa1t`=+*;eNET8G z_SuVr0Z(oUE{zD$brZ-Nr6Kt+@-%V-R9MSyDD;$Mp~K6>%*5P+V`yG!#kYc1igiR+ z;+m7l^@q&G)~Z;ghMlWUQ9aK?2p{qwyytg9#asemE|G^^-p`!L>pWy# zP*{3&;@AOm=6@pX!0bu%Puc|nc!&%O7a02aaX&8mGk1Vs)eHrVf+3E?HEk-Fq$nCq ztgwXr+=-508#S~KPYAJ zC<+xk%<6dr<*+@ZR<2XtL%vJ=0;eK`{7)3zkb|YHi}P>(>%DRR@W}o< zZw>@cr&W+K4TyhX?0ZdA4M3Bxp@5g(m7J#BCSpb&_sg-C9S6QDo~r(aHy>zk;EoU4 zDUK5HTx55c5Ar{_a272xeA1qu|CGa6{wbHqdxrt=Ad8g^qLB5T{p@^m<=jOLcWkc? zYieL8tuu&A@&?ry^xCJxfb&aPkq$jc_nEJ+eH7969A&MbQCqD&Pf%&Spg(@v?z}xS zw-4a($}McjD*KfUNX->5&P{|ry$bEUg`)P%XYK?KvrmtAf_j zH0B?7qg>c;$zgux*88H<PF4c@2VNM z=Ox5i2l}RYXuf8>i1CYO<4rASybDmnM6Q7+tAd3?rXC-cOrRW(sqEhr#{nyz#!ep> zbOa7o_H3g5u<;~B-QU*q^u|`S!-nLl(@6nrosGCU4I5LLp7wKNL!IaV^zxUqiNB^& z)SpHm-gez6sJVh7dBKyGCdguD*Bchz`B&-hwKGiZ2Osw@Be?jjLSFCj8}jtOzZyDq zyHjpbvJN`egn*Y8xEaPHGbJ?AM{iGp3&0y$pNw1N3;K91P*B7h{ulg(r7o3`Whpb$ z^o)v!dH}{b%}B+$Tn<)d;o603k>uT;m6D)_fpv2eFJa09gdZ86a&$EAKTU=O)#o#- z9KuW-e#d$j^%jeF;jQ2N;HjV_FTrhfQLcVOx$%NrArQ^Hx^F| zoUO&Dj|0)M6e2m33I1K*luwRp3Je3@goj_IOMVb(mz8%NZEox&FA4JB#MQs;oSE%Fj0xBtR1PIF0@~Aga|T^0L2^*=Xy{_@Y=!YF>JKO^hE9|6-K2001{VR?c`;uc)_r{igYkq6+8JA5}aK zw8wo=rx4d6Ie-oh>Nl*V22V$XJ_oa{UCx4Uw`-82*xi_3dq>L$Pf_MtQt>_=XSRK0 z)$dw$M@1A7p_=0uDElhqS>`X&R?K>6^brPkb8Wh{H-2-2?AdzOeovZvvl%_i_e)m1 zGC-AyAKfcs*u}H^mSkaNqYCicL#Hk4^QC^1`WpLK$eMwIxc1FOqj&S+`n;SBrtTwM zQ(kN*X6CcwGl%H$i{%NPg3H(+e?vG{1lU4!KJz;-`!o}~RoMgu-%t!AanLA_`0ctj z+B&Ud18zyZ8aD*@x#;yS?5x=3p9vuq@qkuPEC{Fw{hd&dHw=P4Sh>aER6Pb_3g1wT zo?t_=M=y&>5#z|1n3(yNx{g|!jY-?M8B^=mg5bS!)R)NuE4#1Lb@T1+oYZSC7X{(; znopKNpJJ|RpRVg3MEhJ@m0T}ie~n`i@cpm_bKbIbNAG#pt!EqmO@Z^>Z@tS}0N5Vp zD68msv8Ib`;m0TY_&2n_SwJ6JvALAW`s;PR)`Fm?t?+A?G1st3q9@mPN0QpX#|N^O zMX>qlMCE9MCAU=Q?f85h-ot$R?(dIYgWJy6!;K$yIVJe2mW1~yMem((*H_TnXKgQ0 znks*X7$w~ue#q{8T0bC)Q-3ZK`vA9_OFsXJ8E#IiuY5IUdw=HKXIBb-y8dw+e(Gs@ zYJ|>-pB%~E_ISS}HsF6cb8=w zCOHz4`%?AZ?feaf9ru{jQ+i2BtSDu*rjvCVwvfc?j28Gxu}SE^FUlx_ItJ2bD;R<8 ztY_F+B(6;0u6Hpiv`%jv)%x6KOk0}Y{_<3LVwL7Xcl%noxZA1p|DxVhnO_3zj+Nge ztBwC~7kH4jpDcZ$HUwIGy#zfq*E%wKzD|3!3i>$?wC?D13Pt&tzCFEo&?)FT`pD}x zo`qasmb$VLGfzbU@2-oHOo8JaI4`-ey?^(YgKgcry^RJJMv0ZP_?Pb=JKFSlTDv)Y z=TC@{r(dMEBBU#$Po8GyvRB`yev1e35#@RnK6$VaPcrgzC|U7itf+n54V_RCmRnc- zLgplv+dqEy`uFj)^;u|G`fMOj|AkFl4?X309V_U4dMf`%ot|?{s27OiH^N2LXg2jR zt6B5@`YH6VQ^j=c>GNwoVl2qF^vTcpeqDP(9nm*-_PgL^F~Sy3Av#vU$)cClp~dlP zdBJCyGo8KwrvQq2aH)-$C{J$d%v0sg~b?gkL}#rs!yip?l@;Vu^Qn z7i%v}b!#l!#PaFurDjiEuWlgE;H>o6E|oGR#%rbC>Ait%85|IB5hB>X1ldiuml&z> zb;9MgStM)2#O>MjWT)?S?j~u%< z$4S4C1K_+H;{1h+H9Hp2lz;IU6PwoB^L}|)X%34n=h3Sr}^%oW1)6`z69- zD`~l<*tfqyXj`cE`N&SS^8LGZ_ol2QK_DhE7_CofK&41b20}dE=M0975^Nze?;+g3 z4@}}GioR4}oS%4kRcLilVXvwn2kCl3p(|!4xFr@U%1LyAyhy{`i~-|o=;1eQY~?3> zt=z*2;0z(iwlmF=Nxqy-1*J4peCi`u{D=0szY30!(FqG(VNL`6gag9T7}fPf-cP(VQ; ziJ*XV1O&thzA{hzt$DN7O#V69-`#tE=R0@rbJkrex%^DUiA2}i`~kvDGG+RLusy%z zC-=!#O;^lG0`z^+`7>!pqq+{v*dI`WkI)ffhf}OfF$Y3YG=f*Jko}NE?}(7-!V4Ow z0vBXq;e|x6g(=Td7LxX1XC7ndWT1fN={(2Fir z6giMFU&@x6ue&9uCr56L{k6xcbm!OUq?OA&+s_|E3aTP7cu)4oEtPK%TLtcI zx!f2GI@^f6!I|j8d4%C_zR%-ejLfky#vZ@+VfvW;T9-ap`A49HaZ!_=Vw1mb17|M# z2^*kE1&Hk0k*WNA75El@XPU6MfKnIH5j8#c95aGqy*{+!jkns3`$sD-JTGxg#e|ya zzpOg_)EXvIadu8@O#g))>2uztA3G|J9Bg?Q`9r}@Io74P@6+`GD^W^&*ZX-4iQ}~| z_zYCwth9Ew8Q%+woFoNaH0dJHKJAt6UAU#kRA?u_mIcvI)nU{y|0 zeYdx5b<&<_BmIFmJRs5g4LV83B-3PSoqiQ+N9% z-tI2HooMRT=wZA4qln?WSH!&|v$;IYhXd{${C?Ha`FY`Eqlg4vu5Uuga)M4b$(MJJ zh;|E=_2rqN@8E=_2~RJj8(a3qY7~{a0-26i)QB$z~>~B4HUT>!)XyC zC7k%ISS)&*=*}%g->lyv^w}!2wJXQ2&8j7V=ZXsU6~w9O3Kd-48MD58C+2O?7n+VW z%+|J<{U%qWq{PUZ9YXs%pB{*rvy*u*ltLOzS!)dU8D&iE#EoZ$8E^AmnOzLl%;7Z4 zH2YM_@nK0T*6fQc#}Kf`Z^`ZuX@6ipAfH?C-8N&!#}h0RJDs&Gzs_YMdpM}YQTdxA z7l*Ork07I!mZn8y$|u5=@+c^NdpT7GqGxf>e|y*dTl(YX8HRj-O>oKlif;Qvms!<_ zqgGB%l3A$CtC{eD?H^U=UwGAO(cBJCwH_~;$e~GeOXV!m0!M(w!PBt1KF4}YM5$1= zYVkQ@(3cM8tW80yeGnm+Iw$#tS9W^$=$oc}*Lc5LE}Gst!ecX#D6X$2OSZi*R$-xT zi4F*rB8r8bf$s`=U_V;n_+i#besBG1*R5+EmXR76m%ol*50jd3Y(7}SKP!3^&K&gP zS07J34KbIEkRZAP9hZXc=#@YXuD74NeL`LTkxj9328X#oGc(z?e||5{TI-pLt*=U* zi|ojW(pkX;joc`cNTb{4HK@eOV+U?6McjMDEnc{~AF`4kaOKwE=u8-VoYJ>TAcJ%# zXtYetHFF`T({0J@hY$SVSLHX6t`NxbP}*LgWSVelRsV_LGO)BY-&Rej-~m||_PV3} zu)x3*!_4mfqDpY@xm{1+(aygzzTwWUFCp@N+1)GocygSXXkk7jJ~oh$5HEjB=>tqA zp6R5H>ZavmOFn%D3?gxTB#W|=H>aj-QvGZZ_tSgIuXToSUQ;sgcq~Kg$aKkRpSv4s zvuLI_6$GdwUjlVa3Z5fZVYMeY)4gi@k{k0QqQv6#Cf!7&M8f%4_duYF$G2aW?zAna zdy*1br^viSOg9th`|>W@RO$enB z8t+Cg5Vp{-O6=58*IJdOmYQG8WVmW~0XOV0pp;PGqGGJRaD9spcmHf60%5fNO_IPMfGByX1O^Gid~m> z{=Ty(DQ;w@|43)8Ok5)>#`$HLl=yCa73(>^M9Fa_e#EZh{`bdg1BT@J3)j-XVNv#Y zK0j!ETF7u+PZU-`&F!&$#2Y-kvIrg7>zdC?0($VfmCYNrkfX-hT^BTq4xF+Sv%P@m zi!6)dD3v;qG3$3+qtP|HvU22L(hFI>3BcjDv|YNucSnvN74YxM&5foruZ4LUAYF~3 ziLkmFw+xxviWx)JP0fqWwK-}ZPx)=1e`0`rYl_OfUv+}sAX)SEDy{TGm|OIhOxHH% zmt0`7C4I#9q^H!3Gi{GR@VAPwHuaFj4vMX{o&B4$r^_Gpl}iUa@?gFkef*(k+&?VQ zKK5bk{W;@fQ=ba0(B#g>7btNG>}Z=6}88)L$+Rc+Y}W#}ru zJ$A^YI4YX)I5(?*yvXzkV(zo*SmeG*vBG1wfQCyxhXT{;YNk%ZDR=iKV-AMy4+_2` z)J=aUyKO1?3F_5(G6+Zu$z13RB}5j~?WA0L5#~{WjMvaju*;!3?!KdBcRbEo%t1u^ zt;JA}5c2?L}Ivu2lIRl9ivW!^DEM386rXF2n6# z2QU-q{RP`qa=qG~gWj&-|ikHGSFzTPYqv6n;otQeCVlrmfabUs-gQF~<4-HM_r&HWdu6>bi|GWR3SPG0nsU=kW7x2DIDzjhc# z`bIn&TBVwtkQ05!B~o-yc_&GAvM^P0Y`f*_G02OaN%vTjQ@DJ1r_IX`M)bT7kMA&m zPdez@SYjhAPh5{)y_@HusuHW$LlL%O|% zR0^8sH1OGWb%*_xJfkp5(WvEXZi%ODpkhsMOLD~G+iYWr@FoAXt<|NNP?fMb)x=7Z zdoS$NwlusHpq)|6Tl}Udc6O!f42}b21vEV;6#K|j?qIxcTVzJPr>-C6m-E)b=_L;d zv>e*@;*+=Hftu5It~nwOz&=AtgH-LTyE`1`wf4FZ#LqVK@i^uySb$TuP6EzQu$ zV*eeuEAeHov-g`A%v|pzJe`S5IN$TO6jhYtmuK8t;ZL0Y-aFR&IY(ql+>pt7ksjc? zRW#`;bO$rpUU9zf^g@Jac!*~z&r3EUs^o%*>xgvpIYX+xWBq8G(8zah`IQg#_3Ia9 zI-C)P6AqMy@=iSYXf6M4?7P=&c49j+*b^WzV33HWH)m0Yyd3jS(H-Tjki_w~EGB@op<^PH}-0}471 zUs9^pkGM(z>0DHb7Ac90xVLLT)bRyl#pj7ypIZ8-v)2a(u5sD+lDg2VbE1=4nDwsM z*b#a}O&jOAaw0W}oHEt9bJR`AX1nUS2#3TSZ=0dMk|T^asvOyK&fTs8hr`m~x=YZq zXN3?ii<*`Bt|Qfl;yrlk!;Bi+w<5ZX=KO{F^(Gy8Cti9!iBb<8?v*2zCahNleQtup zKG?N=GWFa6!n^Aa$4~6NSX#9z?Om+jPkx!gCJlmv$-a&tbaj~{$d3&Khmd8RK}0q% zIoBC<3j~K&8@Yfm?1B&-2-*&U+69L0f}_X@IFQaJr~wDc-hnhX0h=OWP@O$!FjNPQ zhQV}9(GU|IFcP%~h6W>G2&fr3$Q`8lzw)eZbO#;e0l{Hpem{^72o5Lj@&hSsj95RA znkfuwiZlTu&B4$;EFw61gXy0{Mh5@pL2mK`DFdu&85s%kg@gus-~t5si1~7~`K{3F-*HW;nfL6#Y*U=6kAk9h(xcR0t%3@S0XroxwO85opJuFuz9#fWip0eGc`6*WL zj=Nx4)1_|7=0!q!#XJnMxn9G=gR&k=7HNAS7v!LznjD{|q)zwH0cU{5WKr}6Y}fzf42_)ALCi!EdqR4 zDtYo0xsd=mM(!kl)PTO_ommA0%6mCnaKdcQFeFld_9*8%FIY|-XKERj3XxT7!&;_l zfDt#T5AwKra00BJ%#(-V_CAZd2$A<}Q^->c!5zwIsmtS@#~W#Q#1*$ql-~ufeE9HkXFN--}Ddy0oiB-IVvnq?VsuTixX{WK$@@`T734)cpNSaW; z!eyL=e{6ZBB&#p`l?GUqo}gF>GfH)?T({v-QhI;*?2pt2kGZRAKWcig8wx z30fq1*o~x=%m9Kz=l!b>&h&;j!S%bJj zI>aEYeKy1(Rec6tKqbub8>G9^$wZ+>M;c+qz@?Rc85W&AL|0v71E|Wgd@eLUqU7{& z*cvB+A-E<8v{CE{^nf*XWxDg4_%zdylp;JsSmXcE256(${A3!k6?t9X<;|$3@w#67 zIk4_BRgHX+015z>f3O|O4rVX*9b>ty7ia!ClpbFpQ*Q!3_?8Jl<>VDI@}_So&X?st z`P;!aPwr+?Deh0d+ZTRXKfy6qzRSpsYk#_I4ESHHq%rKJOB*hlfBEdXz#{kYefLLF z-@KS&7WoEmsSnfbgnYB7A-OROqz8P|mbK(M*L(s@w>U1|um^&%_-XwF6A0iYD|9F2 z9WP7ZPBl`h`^IoH)jjyaKg7FULo3vaHQYeZEhIKF(B%aJRi<$=^0EpUAu8&scOv$x zRnHg5DrDv>WP-P;HNDQ`5XVC;(=@=Cn*cR8kGIEN&N3O2$JT@2o6$Cy#|gtDpQT~I z`lHl*EXTaN(zEPul&w-tR4k7LUO}pjffcZD=haTMc9Yaj_n;VDXi3AeY2j7N(%(=t zdz;3w{C@6=Wtm+Rqu!RJ{w?x&75_9VieXjLXg{B$yS#tqBt<8@C99vUu=Nno&#BL} z?U!7|34h6|r>Glc)>rB+Hg8+jHj00a0~Au&1HnSnoMi){Hrz5us9kziRUuY<#zP_T z?y$E)IzG@KtL12r*r8Spv}_68ENU0$SHY@m7q0cBf#mvuIIi;k}=#b>4nVL>% z7J(Sumh&~Bp_YgsK)iysd?NEpagrb7xKsQ@kmAW!!5aSRVb8%N#~|*L?N^rBrh<-X z-J^eB7avSlX0$JhPPIo71^%pdFQQB~Lv#(4&HO|ZvmW*_Nc4gT^b=;a*2JFE`<4X@ z+S7?_cNt5|iY`>;8TB>MyTcxJiN>rAKdW)rsxB75-CIu+23z(H%?f>>)h?@lXpfA> zw`6_iS7sP5ql~CPy<1*$w&EC;RXajz)uIm|2unfRme*iK*5*&Mg9rR!AuQWj8$3kP zN`B)S%cZAsK`+44yodJFvTd=`Ibz9W`|@XaPqKvGMwU7|I1sS0`#*rRS+J5y=&|BA z4u}BDSsPZGHVan##u*}STep#B)`k@yL43#B?$drP|DP6;v`CyQ{T${;)4f@?#X`Tkj(x=>i=#?dLEw;{W3 zLpF2cJZ+QD@-M}ukr~C;e4JLu>%~M37vDTaF|pKJj%5MyPXbPmDe_6eyIFDK4XyB? zznXRM>~p#TDu0lxXSgF#E#`#aXw5{m9Ab?);teDmfQYKK?#Pp|#RXd?X+St{w!rh` z@L(NJsz9C+%w5~FT{}Q{bTCjh$=E(JR?#F&>7U$@A zNivuS;se1EWCJ279)N8HZ+y0%6y*E)1&jbgk-uROH0C!94nh2mA<@5KC^&o*24PV^ z{=k60i6LMZdUG%Y42S%VAyAtq4FMzJsNYD?nBV*$5ah2y05BK=!+?JUSr{A&-mH$b z2B<%0{oj+uK;XZTU|{IqVlfEFZyXo|90P2gHH+hS64Y-Apdq!5-1!E0dIChfYH?hr$OIIJ);2L1CL#o`tb~2lJ~22m*-)Z<@>o1}CSlfs{77 z;LaLI{`kK=IsW^zrIGX2Kb|Ua!J))p4^Kfp2olVyOY+w5Aoag^NdGT9dWVBO!ngjB gUJq;I;b|Khu=S@X^yVpGU=SoqkWW?B+cNz?$!itG`PD34Z+>rHMqM?=lkcZGjnDx z&izcSs@H4R>e^QC^Q=}^f`!9_LV!Yof`X!ka^=%2@`i?j3W0}$!i7SD(U*3xceSv0 zHP-ZUv~V$G_q4O6`UwZamQ`>_yGfKgsy*K5@Wjc-CGmq z0vae>gTB^uh=$~FZYybqCWU0swA6I{s%%vxZopslMN*2?m(JTl2XE7jbAE#5lFRQ@ zBnn2sE8pNkJrX^a2*cjtX+{N(Rbcrm9q{J^E!`0bNvK7{=A$_QAU`5t#z3}^5QuTr zx#E`~_1eMjSNbk%IH@bW3&o$>x0({!&_rdUFhH5c`z#K7x1ya3mdyZkp? zhb_^s-ZTl~3g{9i3*{8z8Muf=A{4I#?!Z6S@*Y)+aicvoP1xs)PvU5&L+@4YPUkOV zLnMs-%u`_CVw-{RBmVR@cZiG^+3xD>E?#nM-J9G!_M0!Ug^{-o!SvHeH-jtI0@2`0 zhk;a>pkzdY)At2c9|X}pgK{`aDT9vda@2=dKdEqS)p#I#A`63QyiZJyzIUe7r)(1B zal(t4pLVVcAF*ibY8Q-Zs+dOyRMh7LtV#h^7bX z^{eQvn>|!P&q-;VR)*Cy^XDEVO@IRyQi=}I#UlijPo~rH2VKmpma2RQZK-b`NwhS+ z30l{fWjhOf@G>^H?Y{m5?!tZbWmC-@1bpDZdH6UXH$azr8=%*~b2*ab)yt3kr+n~Y zC@7k;WF3;d{#PeK^W_|GhlPUTMTUaHguEF~TMiEgXB#sI2b+I6twPJtAzuK`&)E7M zc021r1&WtA6sj$8**@8Fd98R5TB8ES(ymxRb1Y!P2FY5OxUw#*5)fcSnd9Skws$nr zS@nZ(R3LBc6R%uY*(QCypmjF=(SlalV_6S8&{=Z|L2VXx|0wmay362{(Pv%#sRByk z4+D_t6X}uAgrWhB*iSZllj7V8S(u^|g#)%fj0bf6KdW3vI7K>S3qJdVBp3!sMhBS- z6fHIN$NrThs!D)*YNutRoIEL0hurTf()?qX)QdR-n;&WAtR6xK`9*5Y)){oY z7+BBIM|u27#^9{o^pL4Nb3VDKt+Kj#YbBtF-%M+%udM&n>(ne{#^3%h6xsdwiTuxx zvx{)}CcAvLQj>)%6&Cf|Lxph<M27=bdt?}V<(^VI(rQdkw}lJ!!SLQo{I0n6#Ntt8vOShPQ#=%)tY7`i5W!SJ>eR&w1`ADCU3iWfS9r`+*CN0wWzr2gu{@? zk|Xc$)bpNy=I(`$k@Il~2OwWTG+eNs+7cmgJ}p=(zxkMLyZe-A3M<3Ft1-q!UGaHW zFoU$XNtkcPYrDw7yzse?p**|VqnHavzXYj{$n{#i>Cox6;mZx`K=@OF$JWP`I5M?& z@Rq2j@fK?AOgJj~K{J0j7}c&iY3_-uW7g9hx37NW)M{k!q+09hz%lgg&-Xh__7&Bl zC#s}FafloLuZ|8v{BZ&Y2?62|XD5L|f`&NyeE&>ig}U+W>@yx)bc{xa528DI(ie0z4`o zXemjo@XZxaf-2lCzS_W__l=CvM09^svdyA{hlO7}SkP(H0VeJgn`DSZl$Zs>{8@D< zL|ax5r*;xu5-80)PxyLhvy?AprsAAbggHIIOC>&o7Y+y%|Ithpl@~w$Zqrr<yN zfNn^uF|O>_@*Ffi_aMQ>6n5ukAS0SL^Ty)9=tMo2!~5^S`o)9)y$#KgW{FTNK|$Sm z!$T24jN`v23zu&e7OpNF|EakClQnc#>{n#5{5U29s26NBp80u9_1NacHQb$tZ54^HfS-?my=E2@85U;kQOv^`x;DXH^L z)ww5qj^A+5uC;44)H|L2S=HO`k6rn?wLL&No^!OHCyF$s+pXS6`aJ12z&&7HAIv!8 zZb&HuFTS*(lJ0)7vA=E|A1m+inp@{6vVflTag4>sM8HN_;OJFsqE{wLTz1hUst2~wo@Orp1apK!+)DrHAA6j<|XzUkq5~ouN*Xfax*I}fjqlf;v?#i zltS1ZaGc_Lt^e&A=+kG96JOoL-eur){#2|r>d?dzFcSQos|o%h-p{FXMACG07})Fo z)wr(yn1eOa_F%icc{0^Nq>-fQ*UgzW-iKqj_OK;w9m)XgXM2_?v#-VhL28|IZeHNR z&4zt`-DYxj`#mRBP?GAbh`_Ie2^I zso^x9YFI;6MVuUJI{UP;76U&_pTE6w~&>RFfQa?%~zbP3v=RLfu97~bc;uopLn3##qt*#n#f?ND#cBPDp4}2 zER<)?4`PMU{cof4=5Q`E^y%rP<1rz!7pm4tv$k>bF}<>!+;;J^vL*#hazL8ghI}`y z3(mM)_i*24+&>jL89lT$h~K}o=;5R3c9VBpi&)@pa+4|BU;fa4F+Y9IKIS&qM6(_g z5D590o`vK!G}Dg9m{O_;ddddmgzJ&ue)R)TLYF!S{IK1$;p2>6EbgHp+5nPqX{{_2 zm74Zyel;$4DmjI=^nT`%^W&Av^9~DEU^T`a$vG8N{YC95!(VSApb+srvz)NkQF!0k zVeV!&>Ej-?-J`9YpmnrF(Z^O57Z40|D;5T)tQ<^P0H@D7oTe+BWg>#6F`Nrca&hVs zFM3H4Qo+#Zk+cgfRIawiKE0SdwnjKZnQaO4vD`8o*9}14LY9-`A)$#L?mDR0EWB(n zIw)06Udzk)v<7!Cy2KZ$e=+qF*5^m87?GJC8BJce;+0L!(S}GlAnpPOQdrcV4od-4 zs$AWlYpBZBgTE5%s;ruRf2yIrDaMA=ggGtxN+hY=C|4trmmeZ#?0$|iU2o1|Fd!sl{&ZA{%7744<_E?82*=#-cJ&^0P(DJyE){@$W2#wVo*Xw zBt1)xkF(l0e#O}jRz?_gpMF-_UQet}HV|8q!02M+S29tnQGGc({gfek{ZRJ_^DYHR*#{0})`)z`` z5nYCoMZJ}nc3;=#Vyq2N>gZjP`3aY#_}~ooROVBuS<-kIe%2N(e*(`+>uDS- z*Dn559@Y#{v{5&mu{esY1}cg(Uq)fN$l5sWR2jO!_XckU#ncT+Wr`8d6MF%P44#sixnWLdzfwP_|LnI>+B_T;&lw zQ9>WKCNHBKdMqQXtdxlcCEfPZ<76QauG$l)a#8=hol0%XvQBJ?L7D=80Qtnt;|9)s zjQWuCzEPPoR3#tZF91_-Lx^h5h4mSyx{5>fy}UHwKU)e&B6xrQk#!umD*!4qF8qwi znRVtgCZhw(KE!QLeDCvO!-MemHxzgDVgJc-wbu~F+Gw1h#Uo2O9w;tRgy#zPaN79o zF+Sk^K0qbld12%2DfRvF{QbW8{qePV&LQCG;Qe(a;8o-8_`O1PzU%dn@*VUp@_4HD z{xlHq@+uzwzW%!55b!#(;c;N^&hYLON%mSy_PWvK|2&fOeqSB%c)f8OV=DIbbtUJv zn+Q|+Ixye8-(Adc730MuIa#MQg1;jMvFhirn`WzNa-+^8v4b}$=BiBcbIDaVvz*Z$ z*ZK_;g=ZWy$LBpau^JnmhLKtwFYttbUGfVBrtc`?_%gBab)bvO3~sFjAexv!0T|~s^enJ zhQRV~CGc!9yS@i5JLd9{wV;O=pzGvKV9rLs@@{v>3VcIdk~Ij86H^DHFIyJfH4-lW zQhnNDzBx-Iu`7rZaF!#hw?y;vEwCgjC6=@A=d$Ua9Fha}CuTV9TK=2r_M23)CUX57Gb z`(D}9#dM+#d6*DnjIXPem-jA()Cb&X3t10b`nTXNfV8nGKlX{ywV<-8A~NIYeg5`c ze5C!CN|lAsSkm$BW7p87t#^m8VbMZcml=fz-hh~>m3RQf?Vvx5=pnW+r3i|sUHQR@ zRfsss5n+M9!jVq8_uuG+0)GwlcZH+o0{_&~a139yBTV>PIY7XN+XH3Mn-rAANOZE;uiS;^pg&2~{!S^w@s=HenjD zn!j7hrvsY6+1u&O@N%TvpLmf&dOvkXfT{Yy!lVoBB$fJl#7SNh_G(7#90MN}-8L_f zji-PB<3xjC@#9rH49lPI>lewe-t3bJSSdJg!D{^W6WEP?Mkpofy5D zYZOJAm^vzgRCKiF8&rDt>JU`w?Tu^Ov;cO^Xlz=@`h>OPHLsceO+{oTW7f-0bhK0X zk(tSU8;}2YOqDM-LgGyf1;C(LZ0n0g&PB0ZR}xZ0jn)1BpyU3&V?~=6?We_hAD3Mb zmuuF8tX{B=^LmQQIv**YT2D<~-;XZBM*<_BiS@0%SI&;mYXg-~E^}0{+pUR6J|dV{ zG^Rk61IE5jL;r4;Ap4{Dpx4^fZ%N3dQRF-5CCf45#qbPw=6U&7hu|wD-n^~oy;;Nl zQIMYv&UUe#hD=fIn=q0VlW5;@1481UVVm-!?zAb?JMT%Oyx zgtyCo@vzlxUhlTuBzYV+CNcbD7?Tm(KHj_Q5 zz6GG|w5{&6-K|RYks-S;JPMxy19G19QoC&H?)vX)!WQDYU_egWacb@ZU0;FRNH+_G z*3FbW5Fg?=SUN>uVjDL^ai5GO-DP)F&@%m+l^t38MF?`%84t`>_bx1>*t8{l#H;I~_s?E7G1bkP=GTDsxNQCC?ySS`fQt7ioJeu6mz3<5bBsGwzfHE zv_3oQvWsJS*_Q2IRl0{(t@44tWe&_4-oAdWcGZ9AIpceLRTm*(yIpMh`e9&Q(+Anw&=JNVJ)Hf$3k+Yf`zkN18+7d6t| z_1nsvwxodf)4_^^QMq}(o^$HZSnoKmrri5lHRI0khWa^|(Pf@eW4u`og;&LM7t0*m zUIyd>f9WG})c*=Q0|y8NZxToG@V5+rfbrQK4H!TG-EDKwzo_%q&$exb8+v-#7R3x4 zILilYr(uv&gZgqVCxf|O-=2dX@0oKutGA}w5V*p+X660qzM|Oj==isB${x3@fnlC)E`sVGnYtm83=&GQx%g(tiR!aTQ_m4)zhA@eV|V$~H|Rp|)OO6T0~INTmdo z`U08x&3)tJ#Wr(adA_}9cASmza5xl3y ztLuSKBhynaP}*Du*(}v*Q4x_JH4<_x)7cBn|TL+yNsK{NQ3w_CltPt@` zPTniRAzME9tT72GaIJCgSW2P4X2B-ci2tjU-K>PA@>)soFGF)!4c!xc#$>&I#Zs#$ zKuscDCp;5xWHKQ2rqF~8)dv-jv#NdcqiPoF{LA=p^IUfm5pxsKB0;cahBUIBi9Q;> zKALT`q{fIOlM0VbTZ^KbkY?t`8V!Uo4#c@cyAx*SavhxEZeoiICW~7(%c;Pm;V%lz zGz`g84EXaTOJZO9#AsHmZtfHsX2s)nc!CAvqG_t12AndDnxFID-{0ICnaIy7@b;$-nb5wcOcNB$3TUI$A5`HLDH2R#D7MN7 zFa6NBs$Uz*9^0p}U#Ddn>%upiO+w2Ylbe;7`_b6!9h%1t>MW5r2HhkliP0b0fI6uU zbMO!KL7gts3_Oi342Osy{|b5W#GC7&8&riZ(2c&#Q&xd^=FOC@>i{9L9Up$-m;`{- zE#paXpZxM54#=uREUswg2;Sq-shN9he$ba|1i?0fh+Zrd#v;jakL+hfFy zajA$Ie&(XjW>h)i9&ktosVIqoU5|F5;*`P;0nH=~jTB{n-1b<2v2xkfq zYBg{)XoNyo=F%}jTDY5YNv60A4#$-qZMS`!w2p688s~h&(yY7J%8tqUw0=DhFpZmK zx^Tt^>hmV=w19|Y6;YjyVp5Engb;*4ERm*P8W<0H<8+k~MFyPdxKx~MP+dCB=dI%Q z&1uZC_T3qLGQja6zr{PPB-B3g@Ka(-pQbwNY;2&hG?$sIV-pE+`Rm{;LBBX=Qy%#uINPpHqJQ z#M^D0^3EIuD{RFmXk~JMmQfWqhU&dYAH4rF6sA@MVNBNfS@n#ZN1A*EUbt*d5_eh( z>m*ZB15#xa#Z9b4LnEk4N;4s%XNH7{p!T8XWrpeBl`>9Tw?C6*{O zc8ohEZ_XkmhO^fuWJ`0#afr84&uzVP%_Sw)gOa9nOfj}apT`>1WmTnVMt60oQa-;n zI{NS?{gFQ9d|fH;dTP!w5gdt_rDV9(NgE&7z*n_ajP7dJAp8|7rmZ=`#<QbXYk4 z3%y0~wGw_9^g;G>enhFC@#mTR{9#R}jYM+2VKt?rZ>_>h`Fp;!qiowg1GlunDc9J3 z+SnU_!eY__+O_DYGrB_~UPIwZ6#dd_h4-+V0PPzGh4-qT0cuZ}1@B*`0zSMRM7^ON zc7fjXW!|%H#53OP5kLDLu$J2L;ir#D2!W@N;e+j(DsJ)%;8t`)Top|lR!dUJgi?N( za7M?ieJr&h`v_^%MaLM@-G)o|fpC<+ey(L&!XOU7ejGZz)%r}fNf{m0QHOpKleZfa zn8o$&3Hgt;?*Nmq`K0TNS{W>)4yn&AQ5dWw0>EQtxDW)HT%%>UNnvdnTr_yaIDSZaAwYKb2pHxNgn6i2f3d1w7|+f8enxU~k0pv0{G zHZ)h<1dt9f!h5UTgb1~8s6WuOP&)1;)#*G{90mW5$f=H)i{vrgpEXGjafJFZ78Zyl z7j)G1<~Jwgz2WxaL}+V{o!~24YSrT_vToyC%G{~v1h5jk`^wZf74*Y{K;f``hz+|A zuSj7HC3DtRFR(6pvH2lHH>po_J_xp>c1P`*ClcA1b{(=q!MJ9oNSpl;E|NR%;;Zw^ zLEJ_qv#|--?0;;}YeOF4Q7-24N#}4(pZ24Y>v;W*bo*m<+^(V`5f0msTe53{i&79B zU5ro4W+tEjpL}T5+eAU(I*o?E*c=os&3EZbz69}lbBNamik>AcB%#%f$xYh%6|Wr~ zA}q<@|NQBr@JmN!VH9ger?=|=rx8$!KV6)@$uNUeWGuN8#w|~f?Pg60g#3T}FcFTn zg{T@N-2}oxvrMckFQ{@F)CL44v(~vK&r)J6DJpWt$~09?GYCtmOsfgN`hNXfSaV-iP&-%jr$1=H}d-{Ps+#9L4T5a|KT)Qzg+mDI> zpEO!ssjqY~_Nq+&r>@hh@P$Z@K%ww&%yS%pPnQzNJi3BXENe~R3VKy-eK)->i{^}q zt8dLSWEng46@&v$R?#mOj}-^=mEMDwx(diUxqJ0hpg7yO35e!TSaTqne{lBI228)~ zJ)C?8{o#Ie->vp#@$+axY!J7{T3@S+iWS~w4gTi+7b<@X2G-hp@FqAK^gtJFeA>rW z5*p$^Cb{Z}C9Wj7>Wt-4NOdL{frznPi`R>bcxiyL9UAPpN42gf+3tcx`Ah|38YE;H z(pk1*xGPS)I&>Euqk?WvBXy`(nf0HY%^+v%eEc~zagD?4X`5#|=0ULa3%@lmm8^Ge zcplA?tEw7dMj)ZQl$SyG$4a>yMU|~s5=m$u&yKCN^dZT_-ZPQ?#$J zf2ZO1%!fv4T6&H8tJjAHxlePwyjQx9Am*!N*!F@Kd9}Qzue6c*yDT?_@_N&CKT9zUi@`#OZS*AtO zFX78IP=kAECLhH8;_Y};)|{0uGW1K8W=$&K8#@>1m3zsM=z(_YbCpBLZ^FGn3MwDU zb=!eE=EmreXWvOG{(9EzMj%TcP=8PcA}QGBi9UJRDPAoTcvS`*uw5-vv(KiH6Yal> zuBwJsPD55O|6U^LLJ&2ucw z-fwXho9F*|LAPXoonN$ka`;eJ;GI>{Hn_r1{745HQeF2J# z*BNUm-+&;b^rz=V3ujAE=g!(0S8PKGaHbdCENUs`a!R_eDy)#+rr(-q2+Iflt0DRZ zAE>d5PwST45-DtRZcPAa*@gt)cIAP{$`2al;|&39p~&H&GRJRy$31xk%w&2@?`bWn zo(%;V|0z)xyx6t0CJGy@@aD`3X|QWne^c4L2*Eg~9n1i9al?^wQid~jIkzjUiKTp;DE@-jY#K2^1)&_El$*4El%L(;4MbNH&tWwFpd6|WU#oZ#Y( zTQsi96r{As3pAI9`R+fa&`3O8H7aA%#^H3H%1(mPcl3_Bq}e_rF96g)ZgT)(hE~Ov zrfusjJpg`HWeLgxKsVGzsVuTB1X4PtALwb_8jyBLBc(pxYIkTBaT``;sa+fBGuv#l zZY@_-9JfSd>@siZRk}S6z>$rh%-q&*Aq9I?{Zn|fL3!hDOG+k^lXA?d@1TZ#Oh#1WTH%Xbyjr#y{S-rpk9TS*sJ*>>tGS2 zN197M;6BHBK|aE z#Z!87zPz-uy!C8Xv%YEL1Xx{Sa!?3$6S5$OYoE}ODZb%gxFJLf+crlo$7kr(XXVkP zDU9JLqa4{zP)-<`Hm#c+3!fK5=Nto)rI>4bW`L1?u;2^=-#&=eTS zyDJ_&6bvs`9t|}1+FU3giNPw=S{)cx>&P3X4BrQ(1h9as?iLnVKA?buD^Xd3oD0f; z0;;KW{}`q>(E}*8Gny=G1tW?klx4t93(S$43Sbs^AGSjaROx((^ul6GW^+pTUc#dP z0&fNCg_CG>ZzJOBQ5KX%@^mI`q@C~L9(9FM&!{GIgow2&B!kJU3j2WZ+r}T3v;6lL z3KOKxpoO0#mf!yM8zXg_=`FSz^qcsUTue%VR}*iS(!Qt*v;BIM60Dfc8`ro&GR!C^y!WCH*i(#; zijs3=9pnDFUVPNh+I#tB&eR+Toe(K&Gc!-yfM#-OVhxy+f%R4Df-<4DWj zU$FxS8^FfMHz8=?CSyPsbpbk*B2BZFtR)_$w#iW2CiM|da_-SYRi5?TpXAjr49DX@ zDsQx54-HgZEf$RXp!!>a0iifyD9G+Gwk8s&WZkBQ<^%PNqy=t9QYZQ^$pgiM)|2<6Nx;o%oq8GAW`cn$B3R!wTJ0OGrV>L{Lg99hgH}R&kUKP3+U!n69+C)$5aL}a z6AL$dXHAxrXxc`CUTUx-`gUu-W`(knBGL9IsUip1{+zPj^E>Y&&He8TxmO82)lkGVR1~ON^bZFP&e8 z9NWd-o7Y^50$v6--d!FX4&&N>AD#z1dsu<#q z1>;#_s5!_MY?!K=0S4z}3vv`XL@}L;l1M)$vX9#gat+j6)U%&NK3+8u494qGjlIUr z%t-WnuM4byUNcI`P~x3Fv4^qVcb{lh>oNaA)Hyu?f2U2XND^559EdhHa|SIzApdd}C<2wHChUX>HEUG$QSFZel}~;n+VQIg zS7PX%+I4UpQsza$Trji;xbLoBemWRhEtUeU#9Pkkl)QAFTbz)1ULOg1zMh&Ykpd;p zLJRRZc|SGzuVOgXTd@J%OHN^ghWDf6Q23R6E>469POijmq68h5Zbj8>ga{KOJcw4s z3!sitKsR=#sYXHZjS3^-TOHiQQ#hh`DL6K3x>(^e&Bu80_FhIz{D$q_9QWn>hnIX} zk9S^UucAq2^97yw?Fx?y3ni~%&W!;V0xkH_cHxy9XKza9F(mn1)SvRXexD!bYWX64 zF4Ub0M50T67-h?7t}>7|31WG6ihmp0i3CtJ+Ee+o2qrB1N;SEq!bmQB`If=g=cvYN z3IE>vK|?eaQ$o+fNsU!?Cm8)EHdIK?{=7|neN+qRdy@eFx!@UoY8i=Yf|r>uRgOGa zHW^ZYhC3aB5?)n#sqR+LR|e8MYZEZLt+E=gG)Xd7A?dIE7a#*^NgAt6@UZFhmPc5z z^JT2YShg%itFY^jhX*918{uB1ve=E7qd?aVom))W-N*rsiuv<#4B@^5Y5Si_`HPBb zb@3GY(g6OpB7uAc{+3=bSE^goOpEa4691ti9dG%$JtgC<-&4FJ8-J;rb=%b+^i#U@ zL0wjlh$#UTKi(qua#DAVzq}>yZ7^S*v%YWbSwCI%0bt%kGYIY&a)!d3tQwLQr>~MM z>XKi~TR$wU9w2QDRdyy)RO|&*7)(8<<6)i!CFx=sU9I%9c#j#!Eu;=3UO&X{M6Mt4 zh}nOOb2d1Gv;O+WYTDOPou3KWH%T8sK)yWbD#+yCH*w%h$_~Y?o(zyQOfFMDh~ShW z{oyE;oh#ZF!NYCHSzbB&mmnN%zCTiAfceP16cU@D;877fbA(~(4ecwEX&aUl#LV1q zaAVjbkYzMbQDyFXL$Y-%e)#tHs!9zTa-|KO-^{16Vazph|xI>t`ixZU00qz1ET}n=o(T^l!ywRIkU(l@g1rvslx@mwuRP zyO=)YUr9J^%61jDIq&$J;hI1KEIENatK(vQ zip187)%#adHwgzzcI6VQQ$$}=DF)f#A1``od&|BZEDN%#G=!sl&P2GPc-o2!vI;ce zt6|vUy0!b$jgOwquJZ2w$4%ABGzVN4C#jr{9`1_`qLf$17TOuHY@|^(UG}tqnS+PZ<$S6 z)Rn3KJjQ4|s^9rLlQ#PdKbJGp5$f#{C3Ha*6SJq7l|0(WztABa9tUO@qsd!mQ!6>m z*$U9|;Z@x@W%Ymh*r0jP4SEXtZc!QtGN(5`3^J*s<)t2p%E+Z#lANq7rvEksvg>$&7`Kw~n2RAA*3s;~%k1H-HnDph73+MCy6uYRaf^!r zR$LvOt-vIQDXBcUm$#&MP*Z4ng$elm9zO9|#K-%$MiPps?{Lf^;XIXx6{_#AVga5p zy)i2`bN@_>k0BJpKEs%=HpVYMY5j+`bqp`Tpoa<216%ZlTOXz3c0AoB13|!9RM(NQ zS{CwGa948ghVDx~%UdXF+_Jp_wR&SJCga<%N4GVCaer0XMe+Jl$!|9+zeY>pft>48 zXW{LzZsy zS68Z0J!FZrWEd>VQ+P^48Sm|AbE^j~ED5j5b4TOR2)c?-)d ziMt1&m#hj3W@OHgq+lYe3G$rYOx0U$MJ5Et%`7GiA!ZJ2En6+%8a?VRrDTQ7nA zk&nivTW161_imesaF}W`DCK6Xf0x8YcuzeU6vFY_uExBk67P3bZ!9N4D08*=mQs=c zTlv!+>{M|+O{b-h-B$I1zf_2&&9q@6zo_QbDq(u?xlDHycaTZ%Pgrb4-XANTJ(c;+ zfTSJ=ctIoH8GrT&B&>cU6)pp)9Nhz2@MHERVq$#)yMLkb29b4Q$%82b+S%ujD%!CA zb2+ySoP|n=_(aS22X;Wr#0UJi4iwvMzR4VL!nZm-NK2x)y6$!H)?e`T@iM!%dR4i> z?3jde`i!#uALu2w+3hnHKUwbt0PEkboZ{2>A)ibO!k|(?3MEMqC#;D=DkFgi9Z_?m zzmM7|+gZU6gTY7>N?neo_txJzpA3XMQDv-#nQOE6i3~|sRaVA#5FMHuB}^7WB}~j_ zQu%9a|I%_L*x19G-YE{?SuV`niE5;EX!;6FyMBVc1p(_zKZi0&>+W#lf0-=n2}b2b zGhQYMLk$MLSU(h}jFk0+`)Nr!e@vtDDhz`kJD3?K%h0i9`!-&NxQe$ii^i|3)nc7j z;g}?M-B9rvUw1NJFqINRKLQTp`kPTyL7`Om{&gY!2y(biJ3E=GsIq`aY>2V2N*T-s zzE+?E0RN$?1SO=N*?F;ta|6Mw-sS%L@dwyHZU_7M9gb_Q&%fF7SPVQmjZsJv|ICLI zBs9i6drYEKqyqVd46mYBqv}HzTP+jYM2sfkP%Kfsq5?ujPzDWAQRJ=s;FlUiSUmX{ zw4g5^zZh#th*Y)?#$OZ!sPLBed`B%q6d~5)61}N@%Fr5-y2#c0f-+hd>x^# zN#IQn6^TO%xe@*29Y8^!C+<ml~mb>PK7kPnK@<)un9-VYm+8OD-b(bZ-a8Haw(;WBI@lB+zay&%e}Q@w z?F>e>BF31P7Gv^_?+WSH4GR{Ghz4M#lJ#m)eDd8QKy>;e`N9hCG$OeQfqV|ACKp!{No#rT??^eM(DZ$Pv@B@xa6&*v?n{x zQAuXafT8TdsB6S7gOY7)UUQ3AuBA|RjR#h*r?RxiAkNW-_a&u)G*zgF ztrTO-)qP5k!^Qw13`_>O2bUjR-tN_0s9OcHyv#9)X0^TIMv;Y)s4C=-Ev>xOtC&c) zDP$_Kfz}E+)n^e(_||7J!Mr9fr6-IwJho=0M?H$nl2By;fJvG}#`htdt)z*19>-n< zzse;9Rc#Q>x@%qe263U4ZMtRl_vQWIoMW1P%DfW9pv!9@H<%5giN=Vv_u7+iNj(vZ zt0bEI%EjWh`(tt^4Hz^$9-Pn*N(C05qSq6&!i#$i^CPuB|NvdO*Htk#qy1CusIA zk15Zqn;`I?^PPS`w8~GyWjA9{^94UApB3`|?D0{W?+N=k5M2JJ7?YN^;ez%5+2NOk zlbvge{IiT*Nw_Z$W-?oAy3=FHL(n~K-WfYc1GPf_B zJNLBB9`HGC1TnYodo@p&^t4r=w876k%rm5oT-%o#Zh%O_x(J4!N>jbpAyC~p2#g?5 zQ4O&O2&h>W2@dgK8PfFh5F5LOAfNVE?clP0HBIWSR{S+uoPv6PN`m!0J@L&NA_XPq zIFEysR(!*>MYZ#7h<4HF+re`~QT{NauX>*RhoQEL06_z>_~-?dJ^3rQ`NB*oVjK%?}>P=Q*Df4Y#)JO zf}#*46s7Bl#Ar$`^G+*hEe5x4_tBrB2M$w7cg1TE1k-#TE!o5IjrDi`5 z6!^4a&vESSKbFrrY6SSMHJL6Z+Ll%j8H*033`fx(D!AuWWSj4WcXvtDwbjE8jfcN9 zc;hd-7#!=Sm>)>z%?!{_p^^^)un1c^&f}>6;X(gwB}{TZBe-QsnttW=zoN*VWJt#U zE{+I-Dn0*uItpGVWI-7k2X&^(lA;V9S;v`k(^N@QtlHwvQSxdn&Qz}!-NjRm2Hq;4 z+HZFFr{aPN#TaP8DZHJp$VS(+`qrA%y4s;@QaVEa+qf{hur@}>etJpIpPTkLAJN>NfZ z6%ng@I$U3g2lyI(;jhTZ?6Rp$l|yy(PL&AB?bzZ5%=qM@sv}fcnnj#IkivPkjNX* z_N}VHDELQ;{~pV>Q{;mexp7FuZSj=YIjMVIDSt^A`Bx)jSp|EA&_)tAZVy$Y@o&{RRj zf#9bl!y)UoDh%H&E4GcgXZWVIEb>KnF+BR^2E}qK-KH!|g}qJdnY(zC-JM={)Q?O? ze|7!(lrkH2+Ivp@iqYYGA%x;zYQ(x^vkeHN=!FGWrzf~y+ya3VOX;%(A`Spp90s`6 z-h+X*h4c_k2~*UylVgW)!>GiqGvM#-Zxg9X?~MF;%UZ*?cYl9(Z)e|zI3@a^%4;tI zs(@K@Q`z`R6?gH7$_4B_6Q20W=!lRvJspE%uZtT^E!- zaIv|-bc?>)8;?@Xm);_@@5vp`%eL}|oBS&6Vn!p2-xz`Y z6+~JnSfr__>?TQ1Gik+9#Q3PT5f7PcA5tnPS!3vz8jpnSC}Jj|5A3@Ap63B^h7-C% zl)MmCmdjEa_fo}ANue4UD(Sjf07eO$}3p!Y3 zb13ebO5@mNkcc{G$u1a2Sr^DJI#gV4TD%O<-O@4sGjJJR zLKOCIiAej!j^^TWY1Q~4V$PnQ}`2bXc~Rk9K9hCcysb+8Gyw>h2zQ=UrLn8`T0QM=}_XTc9wkOpDL% zos-1~PiG*bzkgGLS{UK~kycxGRg?o%RS<4c)Eu~N#T22Bv0a7ND~fp8Eh&-`X4+LNl$?L71_I*B$po+Hi<85RlZ29KyA22~D&u zM`U~K;5xR9Uso9&dEV1jxtq1Sp2HjEd?E@zTqR_hXGqL86}o5p**;ApN>k|tmp!sc z{oPVU-iA8#(!qYHhh};?2*!6&2Mcaq$OB2aET}u#85yRyKd)JduB@iqkUWI{*#aMq zz0^$gFScjrERADnMU7+0))(zvKzw%viazB3G|XMCD#MCk40GPp6)XkkJiZmpNwqBE zKwW4r7s_gsupvK%ol;OkVxh8vQ zkm?H%lA41XjmOwZn=V&QA!9u*6oE=*o0#$4sAUv^eZRZDcemC5H!|KbERH4W7CsOl zNN{&|cZc9^0S0$>cY?dSyAJLH1b26r;BLVk^36H-zW1{q^;GvvKiyqbt7@;k_U`&O zfjM5tNE4SL>$lb?zm$~Ib)VJ|zmzx4_RoePgG+4(%Dm_w0_g-Z>fCuYamSL#58GjX z*<-1@MQ+pf+wX2dgj{BB1XA|A7>4c!Zv;H{y>>nI9})%*Cm9;pw*K?Qhh&4s=**gy z>15%k&aIG=k4@Uy&3HXOn+%YcDmY#6I5lqq@M4Y|JOOuaNT);d^?7TjG!wlwwvTSi zH+u5*8rvI+dhQ#Cs zdO~Y5H!!O+t*HY7w5D)?Z(ZcN)@q6Uuf(Z%+DIE4fCwJ;c`e^Ati}v#VAFUnm zs4g#wRquGP67#p|aCuDll%dYj`M{Ld~t4 z8ok0@s(ISxws5`dcBenos7qv*(SFpZ^(0u&Xg5k%c{shr9-Dj^$-}BExrD4Q{>n#{ zRWoXUUg8@5C9y8til)S9<%0#OY%?&xAdbeJeMzisHbtV=?xx z+(DT@fK#piM3jg3s8aNF@ZJ}CA#*c|A!>jx6K%J1O?N8Bw$=@~s&4-9%tL+A&aQ4v zM>rtdz=zfoSk5aTpXEHXeI%pizo@p$Cc}5V2a^XsB}2R?qqbZ587*P9!r}(sH&k7H zgRPhC3(@ z;1yVXQC2GySL-1ldvE=Bi~gC03OL}MTw!E1+kvn1n6X%l-9>Kmp=>nk#T`XrEWA5RasA70nZOm3nkUU3rEjPy1G$rXjk1=XHB&((I;-R&;bELFiY(O zDGx}y<)K+XD@8`0@m+;s%gT4*4pV6z9IT5Y=>Qd0y$f~g>!pT&OqFKZyD1XS@3O)2 z0WhEl*@2Cu^1tZ~ z!C)1UGeUZ5*1s{++anTD$}g~m(<8q2(n2N0<-DQ_F>SBY#Pde~iy)(0SK^sJ-I5Xl zLZ5WCUMRekcLGCz-V7!;(>JUGk$O>av9~@eNGAS-hxEmzb}Vzxx1~OwOv^ ze2137+?v;;ThCvv%+bE$(vYDFEEzD-_#S?6*pJzsPC7C1N`f0|qn&qUdS11D)R>8S6|z+Im*=_2RbE@+*gQW~S4Ul(@N%n^8i*nsn@{sb#lRxG zQO^BnU-z9+s7^*csh3M2z~-VTe@Wt|80ati*597W276p4bTHL%xr%xm{f)h-sU>7UpEkvImywDJKg7d5F|2T5O{L5ob3;j@5cx;FV zV~Lz|vX{g+EC1DJN=1B&kx!Hr{y)(9j~P4J|Hq8uIbii8!O%IL9>H35=(UP-VJAm> zvun@E{?D;;0b9~#_7?g&!s&6@I?d0_6wIqTzjzEP`i7>U<4xIO_~Rc+*wYFV<8_4Q z>TNcK{|hq+@_#c^9;y~=``g!1%%$iFj$Hiu+wV&ATY;Ckve27B1W zme~?GU1tI3)(6nF=)@!yfpTu?0On!`_gN8b>+a;nPJ-+th>yE%Ez z8agJ+Pyv4ohK-AiI!|j6UCu=1s8OKHesttI$~nMT4txe40@QRN9RBzKS0yC%%k`S?6-&Q3DuAj20hf1d;*eH%J%R4m+f2PsYC^*|Glx{)wY5uY19`njO zQY4N4XFgW2sQPvZiK+|#0RndgVeKif0T?^osk7=pMJkAHcBW zz5TM>XO5y4-ezUP*3wPURF#nAcxNquyeqp=IBy$6rEkDVAjRuw<2z+@_2&?N)ct+T z|NA_lXFK@Qc2H~`y&E;Y0ad9~QeZ@~(-QUcx2#gNpALG-Y9h|k-(jtrX^x&*Vy4XUwi8+jhr2v^UZO?sA)up zwv)}pWuS`=XWk6s!?leuz2_mvPk~n2q97v^uE=Rb+oYGx!R27_V~};V3q2@mIu__? z#ncnEaxT8eyn4`@MfL9$<84vXOEAgPv5bNE+4p=n!*}haX|BhTe651xd>S(H3poG1 z-n88rql|9)$wmOxD=3KSm8&4KTjBEY3}J4;n*6!;^nnj`sh+Vx<;Y=!yxJ!9%Wb@h zu^$^2gNa^M{Isjxg%(Sc-M0}73L!byOG^T?Pp%?TNM}i6CWfH8BoEGW!9k(r21L8t z36k_X&(G?WE0FXEjF0#TKv`nM9K4;w%A8S&*G2^V6BkGw$%w9ytf8rCXovcIHK}H& z{CwB5L$MiCKf$fKn>MK;8{gwyoZqrKCi0c@kxbxs3APWBl$V2Zs-pOJ&s>rWwdt)9 zlo?_q*dhaZ6SCKqeyF*+^_Aup^^wSX8|TzG0QpB&A(h6dxZ1S?!LfLc^UDRu+Uuwh+{_BH=-Ey7w!F`p%ZLqdvk9A^a&IcVK zM&{UBgeO2$AU*;c^@fd1-zrhPjJBwU`h$vWiz(uf^6}|tP;zrbQXmEI1_%E}&3e$q zc2Mr(O@k3n4#>Tz9rh?_C!P@*8&RbW=iCyPQ8fk0*DXsOC|J`M6honMhlH$gY8d0s z^kyX@bY<{2(A_N;2jR8-kBC0#H^dT9r{2!@aJZ7)(JNA8CmZ2nmo66y^FC|zMv-^H z5G6)sU!4!`%)&}{CD+ZJ4vQm^kz8&Wm9i2*9;q#+u-Y6(?g9#<9D%c*rux&m441vk zn;q9*rnzcHX+4j|Vk@w0!xBSb{mK=Z=E`hAY3*CLQm~M5Zo@L^>!`QlyNdU%NfgQz zuh@QP@C0jh@smZcJsM=cGe89tELg8t#iQG!mH{}5&LFmk2Z}HLzVu0O>0WC2)scX7vZML!ZXgu4k%Z zP!vZ3g>4w#H2Yf6a!8hLY4Salz+MTr&z{r|F8$JEl3p1_6z3k?y7@?0{%g0G`rvI> z6ZG176{xJJAsM2*vetkg!{w*fWVAb6s<{oUHHjvefv#z^mojx;Eif8U2*`~skqMr}jvrJI^EZjfp5^PKv6q_wz!M|U#% z%EiuE5M;fNKmsTrCf++b#uLBgQg(^hfQOp2fC3cYPYo|m!hEe+Gu#w647LzX?{tSQ zQZW6nVH+&S5_@<{7rs}Ce${GJFR>?C#xun)t`%YMA=Zl3JP*+%bE6;B4%WoNWWal> zm;P;8Wi+=#sXfq=iJQoYW3B@k&t)akU+)%cbbfU#ea1Cl$pwftdpQ=Sid?jdZ(OYL zWKaAB;r4|uRz>0rW>Mj48m$6sKYqbk$JbK0(Sz*y&SUg1tH~?y9b(mWD6?6AXk@Os zue@ejutet`Bm0jbjSidty)GAfNU)xSS@Q$I5Dj}d=f2DhjG9Vkjcp8F6t|no=O{UkR83=t+RiNZP^ z9sO2Pjbj=Utf$(uQZ&59SQNJiqJgFO4V7TdW)2uj1p8ntQH@4pM)cFT{2*xGvDX4U5!XPu!Z65P5MD3}! zN=LPZz)x$bwatMWzE2FhvmkB_Dmuxd7-~G`dai6Sz0AT1(Hk&hE@Q zYGCM5(=yDB_`6`)*`tpUWkGfJkFn2{+@%*y%P}kH*lIQJcr_23k?dhJEQz0dvjn|w z8+Vg8MJa~Pi$NHTO||;OHR`)^l3|;A(#{>Wth0tquT>31L8IM25pNP-zALSRsw^+e z-V2M!a&+o6X%YCGP5+YXis`W959ZK3Ps_6>A`tl*acOeDg#1ILh<6>eTbOyU%x}Q4 zQ@s3z-iw*)N|jG1+hA)6D63kmbBq z(xS4N&P<$8e#vg06;32)B~B!CGotBuE4ef>E_R!HZ)RA(_Gl!sY&h>uR@I{@kS)o^ z=)vVnmb7zGc~3*Z!oud*LRUdG!<)C$QJIg3Pwos3qJDx!cIrGs-+%<@b%LSqK?VNW zY2RVK*l^x-1uJ8)X^J-aJy*c-r@+Q0QLf(cK-Y;dW5D-jWo_b2y<>KQd`3_6R1#@3 zbqs0Fj%kY+n5%|iqzmj=z0;PO=-y9h+LFZTnhoYA5L00vg!LxeS52#IEmbLwlE}9+ zgSr+J1qBBu_gp247p!Sp_uLuJjGZ%0$#g!ei!)GU75auAaq-f2*w}hV`<_~Uzn%g(A}M@aet#M;I`eiK+y!X6%eYw%;`;e@ z{^jP{GqxJ^>wY?@V~Wv}e5Pk)``qCR_fWi{>fb*4xznw@HT3^CcD92y%Ml49Jz!o` z9vg99H82}(ZEuE-+b+pTp2yXG_?fb=*X-h^Ee6f%_1ez4iYm-h7Y7RRF{vX-(&`vo zg@5E#>zT#XZ4xCk&Aw-W9d((msL=-o>tbPEWEBKCVPU=->wLsQSH+v^3d`-&$@1LS zPx4xTUNLiuB6M1aC#WwUux)QgI4Nz>Ub2fMU+`5_xk4BGO{aO+vzo^8pJ-jCFX!?{ zj>L}#KbdikE|qYoz>oOwEA;Sirm!8rr|Ji#-r&*wZBY-Y0W|RkG^G)|?{-GQZU)dn zwg%Bbau-DPDqJ7~vj@Y+ZqLxHk!v}QbwwqfP5Y``$@=jB)GZA+>?}!H1huNOPlTF! zxs5FoiCY!%nq>U#rAVG~(%l`9-WnW(O^AC}*QDqWi80;XP^N|@`Hdq2RqxKC!1cAm zw6Q&iE_sSpt{+hnj;_s09Xo;tq#oi3hgm_4U~D#x#?yM8gs$c}CWAcS)W{MCj2K{u z*+-PjtO?&9K$J|(E{c;gS-lhLW*b8AQ_fI#h9gz@NWzaPLM%$7hb>BDmmI{8Y0iqA zBujjVE%>wY86uEryKGI7$bY72Uam|EVW_M6t3+wKw=cC8} z^TXlq>+!aKE_t(i<_Yj{Et^|MPI;rj^+LZU>7Ov%j!o>$AI!zJjL%Uq{ z=V(E0>g+W6dMFme<#T!dboV?Sg*p1h-clRtB_NvT{PC%CTWTUfh?Cv|D^| z1!J!K%mMXAQ0g8VSwcLqqDF^$9&vI{eg1MT(shFpXt)4{`OPF#TeM4FxsT2-{^Mq! zOtnQ$y38E-iQ%e+_q(z|p7DWYb(pQi^xP!~-d({PC+6SlKctm~I;(yIXVOSzU-&M9CH4zM9jC}YF2k^7^7xQ5lQ3$iW&c{n zehxxwX6x@Pd4;k6^%K&}8lHu9aSaLK%wn2t=t_1$KWZb39c(8&pNK*&!MOJ`i#{(# zCV79eDYm7c5faU0e{)CzmCsnJ*c=RM8735J87=Zas+i}?X>z6qWs!O+&uzR`--ci;z831|O0aTWXaZ<;NM`%|L!3^?J)9Y{;<;xwj`3?>U|m zj^ZGU2(=(YkRsk3y7Z5x&4}cIfl%~nQ%b=%M9PjjMw{YD5>gx9d2NBrGYyKuG|bjZ z5$yPWc9??jA$lkgbgFlb^s zs9J$K(}2p{fLdoo{?1D4(17~KihNJa;oP$78t{4`u2HKJTjtGf52M!}?LMyN;Nuzu z@7);2R2byl$;T)5058MQ3?N6$EqoQ5k`Nmqf?1;i1qDHe6j%IF;6 zxv}91BefcMBoGy1be?16x73qUf~f-;&SeFcv#C^Ug8rAXu}y@h;$&u8Q6M9yBsI22 z2}2w!oLk=12@Ku_06V0cswk!w0hoC;)HIgy7L`@=3tUt41iLXrZ_Ioc9or;N_+X|o z{TX_0ogo%_`KM{}l)9)SIZyC@a{YcsMz!-a`_N9_H=XA65xC&D%U0}q@m?K4bIRj> zt%nYk5W9^OPa>;rz>u{EhHweJdt}n|R)}Y&N)Wu_ErtUe=kWn|j|W~PvC<6}6kr~# z9Y}!*e5Q&qeJ45nDHLb>B+dCQdA^0+E`gmxKYCnd)8$KEv&CHO4O{W(6l2al)GmVm zY5a`hdnwK7_-aJbv?mTTdl2G85JlqDt7HP*t)Ez#XA9og(V~Or2NKaZ70^$O<^7O{ zELt%RNQ)dV@Fqcju4AYCpO7G(%)$ir0=0BR>fz{gX_?IxReH218K|>A!|=9N+^H>F z$nsVYiB000(^9)@BDhY8Ug&K+UPb%!N&tx!)ltEoHR z+ahDzhGtaHyPn&kW830mJ*9p6x%XJP_9_-5j#nenS0gkSjc;F5c^rT(oveT4$QvhO z$dV>eTNOS2L>d%##A!OKx%#A-sLuKt7N{GPbApA02Rk^-6qBFX>t`$a2GpP+H}J+A zZId*oMhUj_10+P}8%Z@JnCBbwZSG~bjL7;5!~cUYhZZM<%9D+#nj(jm_0~f}64bWL zhOM7tF^(ajO5`T--7mbySA7f&zBuu=0SeC)_#Wss!FO1p2sTsp03J4vq~w- z<#mU&Zzt)fQ6gf&`b7jLE^o^SLv$EU^?SDEQ#$&K`ojl)fURhCRj(JXePAD zi3qP!2?`}0_RU%osTcg8v!=MM&o7}k9;i1$WI{Wwqa(f^5%H)uv}8h3gXs~^l@f}c zd_6B=+5pcp94FNQvO_>LwWdyHvXp*Gph~ASFZ4r^E%s#!+%g&+&k$sD2>g$Re-xAl zoZ{H}DwiySI7JY#?MI<(E{s@g<_8S%5oW<~!bX_=f$TAy_zz>D9|g!SZCD-3^27+W z084L89^3&D5V8>WzYOhBfA*iuV#O|z@4p3 z6si-*H`2}cY21`B86&ZmEbai}>L85sE5q(IsoBF$irR`9fX(L+`RC6tESpp1PkC?l zf~c+PRZ4I6VhiU7WpQXhuf67wtOfRVZIA^e}&W%oU}y( zM4$;rq0dT}VN(cau`byMesbR1I@_t0Lmm>T;m<{D!clMnn9s;uATmwYdSkEM#zdM! z0)L7Yp_bK%Rt@2?3_nwnyY8^Nu2tfTpMQlV%nW&5UksRuW%C_eDf2kg7l> zH893EQyC7^)f8G=6#8NK$!OF}^m1i*WF~@U$r|D1fUU54=7W`zOjd`J9=WearaU;A zK~_~QeWR|Id*hU36RIUSLENfhL3j}m{hH~-haxXt!4_mLK@TL8Rb4qcoTeR?^JM@) zk5vOjec9#0u%FD!-oDcuj|#%Wa^4&?-f}}+rM#L4>>eT^O_nfga#|rqaM>VcxEppr zOkeeaV6LVaDzvEcB*$N&&2yW(+u2tQ->Prp6qENDru5)i8l~9XsI>K48ev$e zC*(C-U#PG|H9Z{67tCh_DyPvps)G&TB;(f`>?YSg655=rl-lZFgX6Xz+#>-B7ksxP zYqot7TTJ*WBApF2Y~0$S5=A2}#g{6}m6SRiCX20q1`t%cZ%FY2(?K|zKp3$d z>Wq@=Puh1oQzRe{#1-3MMV03;I-X@f`ggkk)MV;jJ#YB{>~iY>Y-H;Ik#0Cg>TM_- zzvbbdbKNIl2EElkDOcaa9)GVOt*?ZpJg8K;kr;5+JU&=dK3L0LtQJn5ZNeXEA>NVo zb;e|9Xb)oledgz`ZCTc|-rPAxPTQAt@X5?+Z?VO>4maHzO8cFz{itlcW-{Mcu04@w zGNIobvFL#RYt8Jtw#`e>U0l8cg5IW7vXZ7$xaR9zVoUF(hZf`|;+k+a>CAN5N z@aA#(h`!c4u^}0rgrL;nFeOctIz0nf(V>XA#W9-EGB8|ANs9{D%e_uO`(v^up4LBR zX(*s$MSsjvVtA^SYkgdv6cwQFTnTB36jL!$FtxzA=Cr{0y2A6Fh7DQZ3FfznCC*-+ zHV9+D)hwM%=L=o+V4>et(XjVi&M@SHMTlIm2yu*>C4CeDLlyQK|V4Eyr+K0#0>~2rDUMPI>9n?7-7tEc# z^}cF>UP6|yV|r$E6n_Vr6s{!zw&&Vx5}g^-~oF%K%Ss5?mbdrXC>dU`+SHu z>ygoZ=Hdiuh$@&!)RknWsP@ z0sIr;Gy7f|kKRBBj#aTDoVe-fli{wh>z^SgE5q*F7DGd^Jb=pmpeRShvU-7&o|17|fjP0{7`y z+(``2;?`woq_2!U}ZRNSQ^&T}>CEE}$ySJK1HYyN@KYn9~u(J-DJoZsU+b>jrPUSvpq zV1Vy%pr3;4CxySY0) zf(UD=(g%X08yz-QGiYM9wjhpF`57u5GyG<=#DrD+3Vd(!4Z6oKl!yUY$C(+Ox=_R0 zEg-t|<(h>L!;3Eli?PRCn}L!=8gC#0iPbXUM6Y5W+&dW(0sPq{y#o!V4<4v?9pU6- z>v$H*5{;n7vHndv#TiG{1VFLZ@8=@hz9BekrQ3|W-Le8G%oMXR?Zy`V5mvfQ%BeeR zIn72Y;Fy94t;?Q*h>Sx&y`py$bQ@^9_S5})`^aN=v^HT|>Oh-by;S*tbWeNgKz2jeL?`JjH%>MPebLrjsOkUP zsHRZ|;$|FH`R=|UzWl1Yqz?ROz&PIr*d(>udUG~iwx%CCP~8a3$C-!dT_g5`EH_f3?5(vjP`Z&q}(yPH#ikW>l(% zG@sHsDh5f6ob6XXtjc@^^`pgkP?ifNV!DDXUZ{J!3nI=z4%nN=K}rUlY!#Vk%B25- z_?An5TEcmq(`m$Gqp(LOAyb9CTg|GA9AaHk!cI4M9(b>^Utp#U7FuPst1{t6h|)HK zWVo|Q;eQDl&-JpE7knIYItEl1sNxTRKi?vxJ$a=0otE9SFr=QDJj64@QMYU|6)ig7 z*`V|NYx+Yt|Ic}Dp}9^}h%3vWe>?`!`)(A8+sNlTA=Eh>=<$okIqbNaZxr*PFhd&g z;4d-@9--osvOLR`f}&_9vRX~)gBYG1I~S-$LJdr&vJ0p@UQGpkF_8WSeQD~7+;o`F zhV7hY@*iZ+A+Xu{F>^?3)W!*m!3A671y)(TRI$;^t#ml4+23mopL=9zP^S>E#j>zN zOT6TI1;6H4Z$Y))E%;_{1$v=JCqhp%=;zQ`Z4DYHI6!WWaW8d74i%J0+AViB?%s2A z{ONX|?evS?a;oB7>I9l|GLL_wy>@XoZk>;7(&nG`X(=Vd?y|KSF|!>>#ab(_ZH_1( z?nDDahCVo3VZSQu|3&ZTi=4>wOu(p#NO5A1z?5z~lp^MkJs}j%H`mL5u3GY^e;>c@ zbf4>4r^x!NKE6hRTT6%fQMJTyVTFh#ZrYOF_i92eaY?rzwE~EESHEI7|5ay#Ds3Si zJi0!}z+zog*2jj`fQePtOOm%Y^^C^xZo?MlfM7DokRXhxYVL&}Bmtc}42lv!oJ94? zLMqJfgLYXRcxxfFaXkILED7RNvX?vFo3_+qBPc_a%jIbPE<_T<`Td@D9gko=f5UK@ zh&rP61Rkqo{I?T(FUY=4*bzaX*u2+^Fax^Crj41qd?U+9H<@M*Figj|wHIt%BK`*S z_lk}e8m4OwqM=i(N`%iPjY31@vi}Zjh{d0cNKA>0S;y(qxxWpS`wXRI+1K@OkL?QzG1|vG-bpqJ5;9s*=4ncuXCYlMSv& z6I}H=r@snCbJ%{zMBEr31{N9d(@J840-^*Qz@g3#IP00|VGx&1y*Z?$E0H>rqFbi1KnRrH}V8*Of$@1o#jtxf!wUF0vZ?63p{2wRiI; zSITPp4M;Zbc$+XqLMlZw0;uJpRHWsikyypj&w9uYgsr1as4QwAhzgZ_dPriC3z>?YDSKX?6A74JV+#tIpMrgyUGn ze{3?4QFmCe1SvCSQ12w7rG-FSj<<8$OU1}ZzKixb)nvP4Aof%VI4O`Ieh9Q!+IP?$ z8vUa_L!zEK1q>ir!v4L&XEE6DRUd~HMjuCHj~U`B21lp!IhEDXg8|^iB+vSznjgE0 z4_$-IxsEeiczh_^T{-qZ|43m^f&Z08t{bdS2-Qd?)dkTt)~WBqjiS+c4mC1MJe*aa z$CeoQq)Z#gGC3~6+@b0JOi61C;T3*{1{hze%%d0ymQ=sTe$Nf4a<= zBZ;knC-o1<(hzvlZ#rWnzz!Vz@OHmj?AH;E`ptW5IXYLcmJqVf1lsRjj0@N1 zCrWk~puF^qyY!qRYqB_3kLJlm!j3OU(<=v(v&fX|FmgkFp3JCExym$RE^Y$q*C~c4 zI_2S>n_+#^Ao4AG3rKY?kiRBF5u_#}*J z_B&nKtoF5=LaRHE1<-Bu(E*d>K-M{OY05YAj!asjmky|hNtY_ql>0TY>xR>m4|7^f zxOPLDE*!DgKTNwaFUO-`3vQqa7c~8hc(V=%EuaipkCPzOyRTXsSt}2c);tMEg2kJI zPe~FZ;AQ2NJ1km2M9_-}b_D*v9f08H*j{$K$3%$-c$i73Q9#;%WazZg8ayQvDKeEr z{3$$0qU`Xd>UDoH5!&&5q4$T|6||5MkfvY}bsmEcI!K~L#)g`veB$o=oB#K-=Mi_W zhcIVUYW{D6#!))_Tm0Xfy1(t8+i!o9afb{n?ylkCxnizZJ5AS7R0{?B-&R}6i=^U2_+X7u=b0(~cjnwqDeLlMx;9+c>F z?jcW3*?uJqFhP~7{Ro_LmU|i1frOFzNq0Iz$lz9C3i+<`1%{crH%S(9-~v-i)ABK@ z7ekS9W1(q(8}lGs@bjlvR}w9=4Cw~53%r&{GRBXpOw5lDOl$N7zaT_r^zt}Lj!vFo zkb=L1bMq&R%J}N+^Rq_}f9yEC>7g$ulvp)`uWH3Q!#!Hj2*nTrN(cm;_Oo5Jx$)0#0fkDe~EQRRYMYUIQ`ct;Y*4TT`^#H*b$9C+M zu7^Qu8#{PvZR>OX+#6J`VY*?*x# z=Q}$e`Sr(?=1W{NIgM^hRE1S;gkfsJp3~qIy2{fWI=+wl%tuM-9Dsi}|3MYGy?Xh> zE)7TILhW+K!e3F7>Biw4>SA)6LH_W!hOM0kp}^D7+~LP6Yii)s%$4q06ZZ0?#*I7#>^?XUM!RM!%-vxlO?+soN^Ccg^ATpul6%~?LH_PW%e%vKDdAsm=QAH&a1T|en&mS& z(;L+FuE!RWOOcD&anNCEt?|xOK`6IPNlw>!whE9Dk^^FE$M(p9qqk z*(EcY=y_FtZ`Wzhs6B82vMa)hs`ddCDku;i+R=|Tqss0J6Sm!v=lXWtxF}cT{bE7a z86*M^jnKQeW?VM51su%3qlTp-yH6cdmRE^%kQ8b zeG4YB;YrRHnDD#p#Lmj^)YZ09V!5YoGk&Jdwm#e^))u=Pjb!wYc?y6`;bNjFnDHJ{ zG9E>g(qPtl>NKwzB-t2{pxBNQj0!1pU6zr+A@`^0p(a~D!LTk>7BR6CuD)c31p zh;x4yMT3m1M_gPQMe7%{|HwE56`+SquNlpZQS)=A3avqRQ25N-H@-na8c_PkUPf8i zNvhR{Xuzf^_ZZ3u*svD`@#YAoDt8?36UEU(k)PGLqx+A`DK|B7B@+qBPp`+AL=^C5 za$h;QP7)|*G?o9D30zKgF+@kET2qA-Gt2`sH1fKb0ps zx8&qrTqIm`qiv%QbfHjZt^qr&gZM{z#=f?qdB1QuZlr`zsmpeZHP3`!zuD5aY@gn` zfdkgZF-svO$I9WFTJUMdLWoA_L2JodOlW)Pjniz-kN{hYBO@;8s}Lh(zFF@v?zI`x z#UH>Io#Qcm($z3F1zKivh<>#~?N^<6EK`ys8=w5Ri{hzZ;nZm!zjQ zB;K?6^u)xM+~1nEpL^x$>wi=V^50%)+f1R2=uc6^b=ktLJW3Wlqh#cf8CdLOg~OJkv6T@7Uk)qpP8jIoQ4VZSx;tq-}Y|44&br0aZkOW z?L72zbo49P{MaftYjP=5p7H}g)f2(EJnwfXVH7)(Rqt`A^p$R)GC-kX)}!6Va0 zTc==?x^Re@dbnrxS!aEgR4vzzCpQ!TT3HS4oMa*TW=T8)NnIONVq(8?oz~4QTA40k z3EA1Mxa&09M6;H<`M}Y^I0mhMY^^Rw4+3I1%`8fZw%p@KfxEqBP!E6!8vY5D52Bna zuDeqAh|ZMMr3;8$FEKB}Z;%{6@2f~lNMBh<60_7jVx)~1tqv3WB(-!CD7hUsQO=>NCd)3D1E9-!A^xmd#*8*QUCm}wr zzRcP>r%YIr;+fe+mU@8uxvp-0FjN!9(I{EZSmO&+ZZ{-U@`aTleZpAYV*4u)BFMzx zpIU6^wNmFX&DuHF>1pllnYhroZT8HGUp>eN8+#2buQV2xm}+CQ&FgIP*`M0qR4C9A zEND0OnTLB(T)H(xPnrqIZ2R=5pZZs2x-~>neVOsgJqfhR=!!=x?o-n@JB_+48K9waJ}fc#vN zgS?6SiZ0awM*sq1H^$_Xg11Lu# zO&AOc_Hg9l7T|eDtjwDBW$#s{oiKuh_K4m#gbcqP^MHHXsTuF^zdEP}Kf%NH9A@*7 zpghDovapn(T#%3dRgt(fQ;IQ}z`|<+`91Y3K1-%cZrJ#i5RnFC-YgE$Gu-%73Af0< z3(5_Oh%@0OCI20c3gt@0DrLJOmp=8U53*+eHVD#%VuiNQ{p-jA9?=6?1LoweF8CpO zO%GK6^|Kq=Vw-C{zp0Ilv#tzVm{QBu1#70y!XKr8{X+-c(!4Jf!_ZW8B=OINM$;$t`r-5ViM6T^|=XI_6Q%8I^hE9=EH3|DF z^x3&y1@ZHlUwiEgbbjl!e#Tg4WBN&!`TF2tJ{egf=F{<$~`MnW(fY+^?qw#0_R%jk0|HQji?xug>5 zSYnH#MvT1+@jQYkHnSiS|6!YNJ+P>!uL#e>g+yAa2&vjrptI*n;i~4RB|3tQ6ec$O zX}RP*IvcEMHwr7^L0x#li`50D-licM0yJ-v8#U_T;r?!Z2Q@2NScsiZY*`}6QnR%X zEL(dWFfixo;|()t+F3D=b0m-E3fVWx<1yKYQ1+tovz;e#TCa8zbFz-mz;`(`QHhxMm;=}r) zcx^9d&x0-VNRBx`OpDI_#e>xGg5x){F%9wO@4*t!%xsi7iZQ$S7ZqQ~dFG+iA~|Yo zZPUfGLMXoq5BMq74CfD#(b2GqC}^RnFr%rlNPULHpKVlS)vR#pIG`=+Fg5`_aIG7+ z*~JtdeI$>hOK4+_o+8q+ZwBybCKJm-{?VQR|Bg>F4{rH6r4LFXXthWwOmKlv-&27G7jeOw!aPdgG3TOOOw~)jd#PE|-PdNkkz#Wwg$07V zK}y;=0L!G;Y`sXeIrhomczK~B3d?(E0f#+`81o#5LcQGoy3cS@n+Sslw4CNk3YbX@ zA{xWdWenT^L%eg>&m0amz_LQC_V5jjhyquH8BmJ)ilFJT7|_)9qJ}?k#Z9Zh2a7+5 z2Gv%@%H8@~E)Eq9nd%0}#)THoM&#EnV(Y)CDoAwNr_4nZ`hVvA=ny$Igu%@Y19;z! zA@URz0r+xb83Ajvg;?--@2LpW(zicp?M*(6OB$MUW|SBZM^@lwn(RYg7P8pxfb(ExO?XaK5rlguXJFXcpl zepZQe@FZu#5*#VQX2kmw?6stAP~GBx?hk7E^sb)a-v1jooWG$xbSmppBe_e#Z%CWS zy&FIYM~|rnM-QRiFAJ^SFZX{2R}ICe)7j0%3}%smOz>uJpAUC!N`aZfMP~<#SzJyr zK=#V8Ci5}R^&~tk7Mp8gQ<2Mbmx3uFNK6?=5>5oOg96G7I9Bv_ePu6#0pOur%%K6F zMZn)W!qOGG7bQ#x#kugKJ^(r*#gv1x7=I~w^XRw|k6c%9g%xm8Gy7?LVj(4osZ<<) zw+TnNGK{@QS5^~AP)@nMu9A3qyJI^mq#-DArloGXD)*U`bH$F%Gf9*m4sj6|w{31b z#WSmsoFnCbA&*3YP%jdL2AA)tC(0tJCnzdPHb4uwGcQC;6L$S`>Nyd+9sEhfcP^lf zDsuh_CEt!w6w;jTwnrkWxJ(q;>F{QFYY`gNqov0_(qPII5+CDTn);&94}?-3*XDVc+%?VW$L}cc zPT4_l{vBWD$?!q@Wvl&g^ZjLmW^$W5eM=-17(9>TYIooJQq#Kg)?zbfZ)WF6?v?2z zkJAr~25pI>v&Zk%PB#XRwjGc^d;I*j3RxE?>x{mQ#v!Og=V|;9HvB$UJI#9v)pjZkaoKO6qE_x%-4Ga zxF^K~$cz?UEbS)NbSM=(DEm4fQny3&kNVyLY2|aHz*43&J%{Pel%yVYWGMI*?SyH3f;KWU9kDc zTpG8gbfJbIL+2-w6R}p6BYROfbI^g#Ow1Yq`~=Q_AbPG^7EMqpLqCzViJk%3KH%OX zO@nXcZJ!vCHE!b1Rce4Cn6e=IniBFJ%Y^z_EO@N^KtLmWl}de8K}kZ|%_i$oJP%#j zaSP@Ku6KrYPv!>Qz^im;G6EMrdw`9_@NR3B9)iawOTh;qdhqoUpk%ol;`p&iW!*l` z;_Q6r1^|kba}G!8XoO7)@C_;LF+o|ty;+Z!i-j%@g8(3^j-P~uj0@5&ghrsgcEL$g zE&uyOqe9vY!KrXi7~^Da0OKU6OvR-*N7eZg!v%lZgGUTm#DM>4iAD%-f@tONBN>$L5LCw=5Yl(|~&mXPw`E{*Ku& zBkN*eTGv-p{C+am-f@{#SZGL5;Zqnk5hL)Ze=Qk8lqV7VSH46r_J($VN2Tcv5nGL; zbBQ{Pse_uzVu_#XN_=k18hdYYjvDvt+!qVR5RbNU)n8)A1MBPuGUmYG!L8Q?QDEhA z2fY!imZK5_E(S&z6wlM(yu`wzhO{e-aZ?kqs46If$YPLarWF$gsb-$WO^cOldVxya zkFvY+cz`e*j>h|24Ig%|@jOmY)Q? zm&VIYVeBBPaJY)7T|WSqE5*XHMgA5M8yULd^ZcQ!xVy{1wWf^>&en~y zQ8>KKzOm+@v`?CP9jQ46Rz&Z1vG-iL9}EmC>}pq{}=ziQG=#*xggqA+etCWMF8p`++yVJwfj8^Zf>W zhQ3en;%l%t-u6$QW%O3Y+z8_tSe$W1PK0svKf+CKI83KYL;C_-Wpdw`n<<97ZkW85 z$#C$^?+&nG+5}2r_Ag@2*C41lmKXSzEKOG)KTtSF5%?p97ZYX-Hh%PJd$A{Jv|o@^ zW6Zqpk4I5ZKCUHG)DSi+9vtcix={Yq1+y4Ky>|HEYbH)EyEe*6<(_>kB_3 zV!qpz|GBL;U3Y!)km9j~zzK(R8$3v=0XJm`dB^ndGxd>d^yBuOTQm-c! ztu4%v7XiD$4-!9CoJ0Riy8dNyD_Nx8=4?2-hbOYf=I;YG@v8718`CM*!2HwL@t0WW zn_WZ}$hLxHOObGgs>Ac3q>0LYAIG|6SvXv=g;u=THvWH~WGw zj53i&2(qaKQ{SsCWD0s#jH)BG5Bgqk2Kqj%33?7V@J8lH8RGRCoxJ%$hL0BVZff*P z6tM}X(C)~zAk?m-&l!L5@EFR*Ept2^KixKY{LDlr1YxWxX--&XGw{arRQ!6)(V0nRc7F#ovBHUCQ}tKr*FK>IpHm;vD-R zV9Xk0X}qU@SWPgiYA@P89(|sx@e^x2_*3blKTM*IW!I zrfZ~q7fDQxqMT?+R14AAfl0%=-+;3GcCXXdaV^gg@ ze#K8or7>B4#ST1u_zLtV$?w6Ntrw9w9kaFHbX=b0QCC-^bW6odpsgSFAyUlFW>j;y z&Y{%^R<0<<2 z&QznZxym5cbl!I;0`sL%aIF)147O(RqaYf0e=4LI4kA~OGn*MgE!)oA(k#D2?-_m&ytZ&0rpSD?-RNLAtD8^KYLv)o_;TwH#=Q?D(!X5Z_ zoJxXAvb|!aaJp4zM^bIuM{?=#h&wUn8fRJx_$_lvE=o$bj|ZY*(AXxlGlopvlL2Cc`7*ORRB> zS81~1R7*!uG3LY~KV1$HqeKF%KL1mH>f?6qRNecJL2*RUO-DY5&3rPl7&yE2j`#;bwObRYW^II|Bvu>Ym#eaMY zr6NIvnEpWuIfl=wuI(rQZ-^~etz503ePcJ?jFq)q-WH~vrGR7|;l%`d3b{H0?Tz1Q z$G8eP+_{Q!_B2{#F`XoyiV}3u<>wZ^c@KdQ%gt9-BzyjD?-+`DxbCzX^dNPwUI?0B zpB3^8AY@g@Q^-8@JN~T%OtU(+ug}$n_xAmkz3$5&4>e1PUc)6?%!?cjV&WU)HB$N` zEvj4DELnqEwF}#k7Ok0yC55h&uJN*M>hA}j%d~n+#+U648}RWKTH%GN?rxEs_0|_B zVuSTO*ZEUllPnvJY}}zEs3ul6m&5glAaC6IBAp%b7bNFfmW+GhE9jA<)5>W@V|Uumrr zjuOyO3w!=iEvIT=7gc2A}>o;s;&BK z(^H2D&#C(*L;sqYy^DC7YA;J0lEb_7G;DxSq*() zl@dve@nGK$N;Ufc@)>54vTZms8k#msf&2oS@whiaorP~o^!EDd6VQtlAZ<(S-u6p+mw` ze~Z@lmU9aaY{FAu(YnG>vCzXrGqt$&sXO~$R%0^x(e=TtQ{Ef1UJB%0YWk41dqSrK zUROH6ZQadCO)J_<$2A(D^&E!YbqLC2FLOw+ao%HgW!0MyM<7B@H{7K)hQhN4Gz171 zWT_rX)SZWA@gB!JR}ENQiL)6PC6rB=W;h;aWx94n;LaJPfN7XcaxSTCXq~zyxOr7X zvaXLQ>=dLV`R%gh7@9^P6IYRtCq^BdY%|92Afue#zI^Ky!+6*?t^1oOxFp}Bb@H!0 zNx9#&7OmbZVmY1*&YpM9$tiKv0dgZvGwf4jdflCG{4}zd4If2dhqEzGO5aU^zsv3RS8Mg4{Q+Dstpj?fD>B<^1 z(MoZDQmhTkM41(3xP;#_E)9b9oTkL^$EJ`s#%%dhhA%FcL#T4#)|sU&?1U(Qz{!un za6pS;%}B-0s3(SXW(5b><=W~7*yMLW?JzyOhHo$i(fyc_yJHazL6EGN$;e}wtj3cy zUNR_vHMHZ`r|W3A#~O9)MCP>5)^5k9+NPhRCap;hlQ$=cyz(_ba|a5Xlhf)Xk7Te@ z#9JK4`S={3TaRtEW@eXzuf4*NBFS{W6Kx*hRn;%Mt{8AbU#tYR!9CjYjhoA}0bu;3 z-h?)LvgYaB?SU-sKF@8g)EtRY$Vqwm_|sY?a@jCHC87L9A8v`rMtzaSC_|w%iL(85 zhQlvF0XYE*Xd7@NIsb57VG_E=-@mK#d*B{N-G4awv#5RGD35`PWEwaEMN<7@!=p%N z)ZfMkkoEMdGT=?bvp?BYkz_>gv|qDk?0bj&M0F)Z_@{~yj2oHr&eiw|(PhohOTQR* z?4U-HT)>AYx8dNNBxMu5Oi;gB*K3OpKB`jD7!vZTZBjXrr)-x$P$ zYq9a3D-kg!B>ry+5%OHA#IYR}P-sA@`@3Ig#>zmOKd^wvi136O=IR8yEhLOu%7}#dBfU{y{m$o5CF3`lD0O(jQybwTLG!ug#UHDK+SBcY zG=7g<+|HvFh(N{j8%?7W)h>`Cfqk1R#~<%}ug$>!d`M>qhyj|0>ftnqA72rMZFsJo zeo=KzL_(eD5IZeNQlIcX^zsh(Navd!LKDG9?+VkN6gpmPfis!8mfc2##epFiirCa&q5;P@-P(3!H^~{QI&YCq4-?#HU0>cD3ht|-0 z5s$gjX81z+xE%7fvScrAVCX}jY2L2m%9IgY8ev8eUd+Y3&=G%8!B@sB9Y}kCoD2~* z#a6^r8Q;7az!E+u{Em2R#m$Y7$Z;v$GghBt$<1rcq#Ry(g;!*0A>*VHbHlWBn`+knJ1Zcxbg4ojH0gRFESKy5l$N%>;u~{A@!Me(Q~i=r>03}T;>mX2w6YUA z$;b|Pk`Rq#m_bQ*77r~4SR!djBiPGn=SHTLUpxHNW^01)Rja@hL=7R*M_% zRin2mX?*ga;2#r!bi|%E%XqH76#2>*`^m%MadXdlAJX8S{cDAS1ZDP!f`3FV_>3)_ zNPdbG%X;E1k8!Lt)Yj1n+~*OCIN9}f9+LX0&s-_7bF5M|)i6A*W|noJMYtnbH@y*m+hAXyA zi`%k>m zR;m1~B~(j5_4~r;?MRZ4V{oeF{$w(nr!F>}v!A@ZrwkquHx-8Lr^Jn%e;peiy(FHa z@)HPnd?q*Z4BLU#MxdJG&|^d(du)%-BMdR#JJ4e68JRXrb@=&g6D%nH@^ZY^U_Gz8 zqw@cd_oh0Pu{Kl3SyB5((KnV<6w^`V+;Sj{|B?s`PH5-`_cKi!pOB1dhMp!@8l_LU z47I2>Y8O-4fZJ2y{c7O&eC}YI7o55n#Th1@R-^f4H3E?999891OwxA;nPk>J$o_wX z8t2=M99;EYP|faNqiL^!B?-&|2@!8PBn3W%3joLzNX#E=&0x<=6aDyp*0~elfA6Gv z_LrduqqPh^VIOElZly~JS`NPwMEYKuk&eRG{t!*k&xnM2rr@3JM##>qkF7)NmGDPq zAH1=eo>r??5wOnRT%RXD-rmm{K4^^Z26&vc`EKdhfrX8+i_I(-%L)fvWY=@jB54=6 z<2{;9^wJ-@Zjb(^Zq;RP0pRRf>jJ?=lk~QT#Nl>b76Yx7&o)YGpm#z+|v$Yop)uoL%4;galj9fzIRlv0LY6#=J^KQNlJvK zdmNu6E6r5{wQ7o$c34UnmdyXrWlu@3XHQH!XF($8?=B*H3H02`fKd_!C~PHwy>wK5 z@^9|qlHDksV`KA?elL`ikF8P(#&FMtd3*j83#$-Gl7tJAhlXArVCGdfyEg3tu=p+D zxf(IxfF8V{21Jf@=~>SLAARJIVcp+fs6&Rc2>}0vs9wfzI{HQNuS^*=dg~ArQyRwE zR3b)$l~k|9e>GWg6<}l#X<#&dH3m=e(JTy9E>jn+siEj^8A1pH$pAQ3E7_UL0~Q{L zU?6~SU3Y%3^86`m)~xFaDrEbAoCY8LHop%Xt~W+f(Qg6ip*V%zDXW0B3cj3Ed=g_o zxy5klsnTZd_K&Vr&3cglO+eqwu>z!+Tq>WkS{_J+lZ&2lGN(ARfdsK{x{TypN7JjM0$)%_~chw;VHR2l4KSJAxU^mKz>hMJZj)qp_4`o zM9xSGY(B|GD?rV(2W{raT2B%A;ZD<;YT^Ms_NoonP4utc6;%OmdQEO_@X=pa*)Ix=y`&`qmO?3qB0a4|ay}37 zwJQTS9h78_MZPbYg-R~$`e#ii;`cHX$)lRiPez@g!s(*LWwwVwPx(8l>f1nlgtOGw z%0D+DcR0j*PYg^{dR_U+e<887PljpF=#UJwJ(>oTXernAh)U>wVhC7tF z)kG#+1rLKTlAG4}ZISCfQ>LRtx|EPCsA*Bl(+ny@Xg7uNi3sR$OJ6mbaHwgSEKk_j z`^$#Se^AFPc0l=Bv9S{UDbDH;9q1K}XJ3}7ik0@pQ)L~zV z!RsT^^1+C))%XUfU?@MfjCzFwuFlN9!gz{RFmoxD;Jkt#e?Rh+3QBSuZ6|c~SMoW6 z^pt*_Mxw-qnHjaq+_sYng@|Bm9OkkrA4(lc#Z*jy6^U57RonPu)JL}!d=NxZSRKO> zrIk;&)sCVunc{u({w$%SUUcll^jEsek7alW3ZwD|7-=nnvSAEwC<_ZFY~R#(t8A%@ zmQFBnM@~#m$F>lRU!%Y!th^YWSKG&&*hU7cLWWW;R}!2n&nZ%weirdaQGG1Mi|MZR z66*BL@e8CO`)V>Y_(MJgBTkY=tOjnO;T4Aoj&huIBK2lcg-}KY+q35ialm&7Lt-Lq zUXgLOfyInw@krt*C#G<}=$c~cO`+qNBn~tROw%<(l2cqUi{G=m#WjO+66knF5HR#v3qbWmQbD1cg8 zKLtSHdbfCAvR|u*q(#mxl2nX983`sXJ`ObGxyQ=Mt5xi(YniZ3M245J6(K=1I81aq zu@}_%#Ch<{I1EI=Cd-TRFy9QS9_CauJp5EjuB8Uw*nq#8iC*9-f?CwyQ|eW;=X6W( z;Ely91aF_vN6E#to_EvQMF^U=ONXE&Uv#TB;v(QF#^rXGu!{!+jhF#|iOF?;c)c%B!WGH`LMvC0>ae)Kw>dd|; zo?9x8%S0l@B3(V&-VUy)f-}0#w4wPT2CdKy6 z9=Ll2ls-F&0iwQO%&X$p$f4d)FOaXLk%BL$3D{7kFuw}7S(mlgK0?q~NccfEt_Fc6 zHm8GuV_NeDJ=hQUpFF5+2*RH<{Osc-+j&QqF*wc3Em&MdLMHiv0cn~S{^Ly{#Y&w% zp*V)o9up;^CGuV?2dRxS^;wtBIz4Xr+|RwG__t<(U2rqFS~NkYHCh2y`)gP;vmIQs z%s|L^d&8a6W+*Nz}$Gx)hKVU6jBxGqa@zkL?r&J^8rYHu;dVq;8d7t&`cSF34r* z303^^bW=B`2P(zz#V3 z{+5|raKIiQl3KJHd#i$WicT&;-7B+!h7)z?Vx#_)uV2~f! zWT1+qE{xQeV=H+u)(`|-zYKV5+&}*X;Fyz~ZYK{awGbzC^cx0JwBiiX43k0~f} z12zXK1#;!85ZMPdJeJ`6Y?6cXHk7iqB*I|`qr3@t9c@9aY4+#RE27#Gws=SKq9>Z# z6jqE|Pm&KoiIgWKdRIun7?Awb^cUT}&|8VH5(i7xVzxMiU;r5q{?OAR4FJ(pDT6C& zoAWieM3H}yajG>vVLF58A5x3)?$qNt%S(m?ff+AjvwDjj-~A+`ZCB$ zX^Yhs#@)-p@Ccj^p;auT8xLmFvv}A~xk8ick=2kxA zj5??D-B9#;u9jCm#HnyVS&>$pA8dkDOkqefI2Yj)Pu)p>X3eCB;V3pOX6JE?auyfl zBa76j;U?~>y&WE0KHCj~o3jfUsWFBMRaFKoD{x1Vf^k~;i}KrbUfmzrw$De}*o|Bc zF8C>L^=o|Jbivc^i?KA%d(#ob3BlpzZ96*ZnP-QzH&TCTy=4Q?5xX`mP0{&P(#*J@ zCyA=YY`vxPgIW`{PE^e8;&i0%o;EW{8Jj8UQ$;K6hGso+s=^Z^DaJWka}qJWbjYAfP*qxWPDtKwcZBx;p%)vcP z?8g%6O~$WMw~2yB3Sl$%-h{lg6tN4qUv96TebpBjut6~5cy}^XJrAvqQ0I^%dQV#i z#Lf%Eo{E=B`s*fI$?SZCqLHX}(6(ZCn@5~yp@2R8pm3n6C>3o1DBu8b9GG!L*$h+u zM#x!ufJ+(LCd|qkW^mcZ6M*m1Vb!~o1578 zhXwEo<==qURr#Wbt)FNP?G6Dq1C6FnV3XoMT=yFql72A}#KWL(NoXHY ziYSyJ+}MsgTK|O&itLPj-3DFOw*je7bC7sAt_F!Ewi<~g7!(X77^RFo8yTqD_+0>D zqSi(bgMp}FVn&?BOLSg#(v1(|K*hA+%mK=SX1#N^A+l_d%delYM+r{{g!4+) zV&41aU_=h=fOHwQTGJ})%yAN&XmJa0bPp*(Ys|p14ki8D=zd(DK(1i@_iiuYElo`t zVQh9bAA$;#W&;b8x^qfORK&o^QsG(%0N)WIv;W4(rvyji1E#|K5=vLrtmafli+l^x ze}&$D(KB9t>x?)$-LE|7FP_KQuBTC^J7F(u zz@CI2t@x8%s+AAB;rVasIjBn5{&y*)vMZlP zwbpy`0GlvXleew_dH#!%q5-W_j(@8lfPM}0n3{jp-8XrC`v3rY|B5@H09JENuQwoB zL*$@Z1>Fe=MvQtW{f@=}Q$UgrDXH+oEcVhV({8WC1y^__&WLH?jkIZS$v#Xbt!9?CyE^7@K`MLPZ_W>Z8jHl5AhpV;8MQ4m^~MJh6|d zDDY&eH5Yi(Wz^?7D!^yt2_$`OgNG><=lg`vtglDd+3P=1f6?u!pqfrK#@k2)^yR7! zpP=(wZow@$RHw!a%~cXltLB32jW+eIR*mn8yiy(6>%edO9xNrc%j>gi^{njPSPvN!ld zf=x8>x!qK(QhhNp1C1xPILNranKh@|iW&<5+}^+TcHVEonUEl8#mnz!a(@)L`f%KC zIl-V7+)u#U7O?%P>D)`^Mp%HvV*{G}w-@2z!N1*$P>^fiPs*0nASmFSSA3uS$U1B! zSAD~drx79`5=9LZ_Wh@>G(Os76^44G-w9QFBP5&&K&ZTpr_5pC5R4t1P>FlU z127t*mCR-|_R4JHM7E7Un$&oM8OceDeT3onqVfO%^I zu0yY2J`b4XAktwPV04pzhm*|@RHJ}&pc-954pKgdx`Q#lS7wOE0OD%U9>M6WjH0f_ zxE~ONfY>(g2gEjpalejGRX3HK_`QK?J72Lw2ml?xS3X%-Il#>_aM;=Ymq++TfCqR4 z)!^TjHwSR0^BWDbKmRosTA%sdMj4I>HwwvlktEK zoa?;eyzRzq!|6Z^2;Dv+N;eH~rZPK#@d&wz_?0&*sTHt5#|4iiLP$xWyoL)WTuQM~ zUZXKy2DsoB?BE+fD|CDYM0Wt%xv3NHjFU2%U!8vZW-?i^{o%UFi1L~$Az*FIimQGR z$B>qt2i$NF?fuSloKnXrC7+Q|Zb!*F$anjM&rMzw;KnEO`y-2V8dzgEPmEodXCH3R zD(IjQGR_Mw>?m*`d8e`Od47mY{05Qi}_t4^V=gxF!4?X63U#BjkE&&+S5NBGRecBPm0RZCj3Wn&gjm zh_ZN--iSMTjwmmy65;}J(^rzzUFm7;<$95G3MaD@Md+gRvm0lY3v+#QXY7j-vjT7K zJqX1R8P|cVz?jmuLy#6Z#F9}_c`IN&Y66-Np{^>&FH8#sBIg7mFG-YP^O+V4hHC~^ zW$eOjoB(~dmE;9-0&3Bee8`q;?Ue`2WS~+yIhR*L_k=M_=Y#_ekc5A0ZcA^};w+&< zvyu&|((lX$XgedyjbJHg*Fe@2ALk`H!`=ZUi4!;&C`o7OuQ{KjVQ*C8ztWJ1lXif^ zEQ$^v5L#qvS{O>bQS5*02#rK<6jm>fFDD=jkyE3I>ZT*YQ8oit)*PqDA1r@eK0Yix z>OEB52Xk6nWFN3nKIcB(&J!^Jz`f4*luO3)&yWpDar}Bw6I7i60QCQ8f@Cj1@cq|< z&$2?b&)cMuYT0xZ#t~p)kby+BMg8tfGJ|(kU{n2HYmw1|iEaK7=%*nZXLGuh$EsFL zl;jgl{)aku>aHeVrFZ;i9>AiIYomxZ|HGcnee15{S$3Mh*)fZ&f2AV;Wq*?M;;drMxm%C*qOk#KN5K_ACvy+2 zzGcQ)x3^?Ww9GlPS=Mbw2UugrV;M(NWF2=# z()_(_;y%G!^b(x)Y=t0`KgGA16S0nUm2dvznCp&)>rN%!jTFe%v6{(Ig!0&(#t5@F zP)wL{3#h~)Eu;v#4YZ&5jQO`ukzfNvt!tpJ&1<+Jl1mDgVYfr~i4RN1QKpozJt1xb zLEiTdxLL$W#p*MVTH^#{Y%fdLOuZQR}(Ss(uV0hXgi|KI?n&(TLZ|L1GV7zw?fd z=ktlx_N92zy&M`fq34ayT@EOJv5Tus+5#}FP9}$?`{g%zv9bkwjE!1)_|~JNm;YJJ zEdYKeA~$7auEarM)zO>&R>NpjUmlSIkA(mckcb2g;Gh1T*;!|=$vMT;z~2_r(l4aB z;b}`5PQtAAhSiokfoqboa zaVC1(WZam!S+0_NbD@_%Ir(oeMlnAGdc0jdDy;3& zB+U8Mi_63L0<86luX%oCdn!{4W3ZIlPDbEV?(Dm3!$HOQ9S+?Nqw$!PP$%zKPF-4J zHUguC^dC__911xLd!~t|*5%FRBf*(n=-GQ{Tp!*A5)01|T{)C;Dj%wuxA5N+>((Q2 zd~wzb^NfgGqTn=GB4#-6#ung2E<8;gOMw*$ObJnzmx7-Fc?zp(k5eUADLhvt) zyxoh=?{Pku?WV++t(Mo7JrIeK)5V`}I1Rl0kWjPbVZ;|QZ?Vi~IKP;TW!&O?o)A^Cm~xYYJpM zLu^zlNALrOZPQ!l(tEE?Q)~ybsNCy<*&|Kg#pf;+uzh!9_|-z%!|gs{%f7Q|9CKQB zez!ZS^Zo8)NBAMdb}47Q?$2Xi>Zx}Ze0~pNzp_)Tzq?sgw(f^E$AIMfh99q&l1rFK zgJ9t!Dr)&MB1;Cezyd7wh2fS5Q;L zb*A$UO8VS}}uuU9qu_zKso@kfmD;VGnU33@x^0#hAQ9bqPMup#EM@ZV19G$s5 znKHF(RR!25O?upn@+2>7gXxrNnK`Y0{7MqiveOY@pW?7GYL|pO`L+0WMV)8m8{FuhbMY@K-U7=u9qth{(;UB42PRaRTq?2|= zfTIQJ4X;uX%zt-LE1G&@<)K%u_{G9#7TEVq<|9XqD0)%?AF?c22RY8J#oeL`8?WbB=+T2%$HeFlKt%kVRK)H8EIma(Bd|3-XYke*@V@5g%zuI}U z{h4`>#zp6?gMS2~bQ{Qat~)0WTl|~Pu97zoL4h_ZLCPbz# zqcXWalFd6)iGWLTh+#a~aI_`TwCg34DM9b#F|Fn7wP|K)$YDy|jt-Q%esgW93YkIv z_b??#oV}tel9*vt!(ZO5{y%BO-Q-)Z23VKD7GaOsQkx+q z1WFSPOkdM}y$X^KJ@|N1^v*=iZ!-6YAW4O1MMu%~`(G#Luo|V%JaG?)Frx0j{Dcrh zDTZI~#u%xqc=+{m2V(_4!hYr_lC|0g2l&b_)q3b4UHnk-DgCxh!M*9BGkr13lp+M$ z>q8Pff*tV=)_N?Kb!AJDn>a-PTO5m7jQ+1}m0t7ioqVam%~DrN`}~vh+QH^iVj7Z* z%X~!Lt|Zg>c%;T&Ho*RnVIk27MG747Yt2jI@5?r)kIr7AAIM z-ctmuS;Z?((bvDWHsaaGNSl>SZtSn2uf(HkrJAUBJ2&wrL4no5HCw6Eg{Q9g^Gul= zyC+)8!?tMCxSnxaj^5ZJ?0u9$y!@N1i~Ae*x{Gbz>cb14#=tR>3zto$U!t^D?a^y@ zLgB;HjpCAx*GsJK>75LGz0`=I9ls*tnI{f&U1~qyOv)xp#UKSz-|2MQQ{T}b`wQtE z^2lt|8$;*4f7CPbc9Cn36m^SEzFub1LBWXI*Eb}2SV8Q#V*IX-EU1x@Fa~qpGZK4C6;Sq zc}p|m;dXfK5=Ahi@G;1@ktp`?$^uXxr$hXA++}K=zbPkQ09`qlqn+fx>WwkTgD1h( zmkpFi?uHNoJ&Cu#LMVixhik<9kpS1BzVq18S=9$)04>rjB8Mw3;A1nhJBCBBnc4JR z5j27%cG+SrKK?Lh2hO4hklE3gQ%<`UC0$j33X{>3}0&*_5$@-4no;a zMyw=MS5Rv~ZkKFDEA;jth3GGil+G8;<_y%331C%ca)?fE) zCY`IXN{hAdBfG?$*P_q)(SBjqqwir%66`)6{)xy-R_iZRK!B}`%JUhq$9Rp zexWf99aO!hfjST>s_qAizuIsjoKmHjvPB&MDd_(#P+N7y-lWmuKM>TVZE;xIRYGwv zrTTqK-dC>ToUQ4;i;BG!As-tZ&;h>IX!Y;mSXomJNsHChj2orfI4(waeyv@I$hSL) zWokF{9tAIn_6nat0xu7ZviW2|Ybk-Rb-x?F-q3?!=uoVmrQ_`6sH-{uMajIsBML8< z84*S!yoc&c5m3NW?;pmm43aBiS9<0=LFKduUho&=%S5v(-!kNmkpr^B4TsS_TfVJ=B~V1>ASQm%^w+E> za1-R4!Me_XBRD)Jxqjj``@KHlbQc404UyL!_^z)w>B!JC79+>PrFsUp=%@n6EBv^8 znD@v8=sNDpBz4=HtOmBuX?|Iqb3QNfn?JJnp6Ed`O&5AY?ZP66!{hK{+qGB{@9W4+cW%hE*Mnk;ey9)Xrx1zV>3qoAh`nLRtur+1 zc-h3CWkk<|iyDX*eXTbiR3UEF{v~9dffAf8=b|X|`eHOF75rLtzj-Wktgu42@Oo&h z#68=WbNyxL*I1n@%YpS0;sc^AIGE?9?e z8LjO6^J}Muo5||V03ahV^yno7i0J$_A-BXEHs|>ee|T3#n-d0LFE4tkxBlNwMg_Hp zRXGMFbMRQ(+N_FJ0}FfR-47^x89w@d0q>&Y-m*F8{C!wE3(~(k{5 z&7?yi89LOVoYIQU@bD3KCpCOHDaRuIQn+UwBV zEz3r}W!gxX>0TH3GAm~_aQ)B8J!p%Wm%JQR#m6*7a4s~Jcc&FUPsIMS?jmex<)4jeySD+E8o~Isoa6R=t)OBE)J?785Cip!T?(Fpr9$HGy z@2YGgBt736j>57Nt;?@a%WNFJqL`@GIj6k*(UpzX8WjHUvg@q z`0mmNlV+9?%uNM%`enjt1jSVNseY4qvp;`knQ6AIGwz#Rh%5g~-G)Dtj8u5!`oTz8 zqf-1*^C~a}IA!oKT~F5#)S&uoIYcDJU4N+=--WZ?{0fcKrAZ^B_bA`}aL@O^FQs_q zQxqy5HfH=5WM0QUFeOvQh9MY|>wW&yUnU=8MfZN9I`E2qkFzjX=W;JOL?jLt!oun^ z-Klj?zhGFi*8jut<#)qdH{n6<=S_(btfd5H`*y;nCJz42wKI zWItY4t@z*Vi}*c0K701;pFdHVOHRZKt|5lO1x&4~DP6gaZ$HJaMBBx}F?^tYCfk(m zVex6GmTP@L=0NS}jN0>qpadC>(&yt)TXZ~s>FYxCpsLv;*To`eYkKF^%G6K$thA!b zWB<0Rnf^8Zt6!sKAb&Q`^I=0D1cGs;&WD{DC(h!K$nB*4vZ}-wMHiP44LY67+)W%( z;&!r;oyhI;zgBr$$VQrv(KT~ziPY{Tb)E5a$Ps4#Qt4c0tOvPOTcmN;znhdRI-U7u z5(jPb;hS6`TIk0S%2$8;I^%6^lCjF8N!$W)+{hfQKYv`f z2}kjVvjUhWHi<-bV{&m+fHpa^}voZetg|*3Nf`P^lfW zn0n9Sr;%39XPWudq>o{ri}wWE7(+9&;siUPOtlSeyA!%UAm0bcU4ChPzTqOutmX9iuRa4Rzs38 zw}OdaJVfICwk&sK7SCn!)A0fav;VX)xquad-hwfBU)@ERgvDQXgmLCDX6LiOw~yIl zcRu&$jSqT0Dbo8xQc%YHv|3+o?Qb08)iQ6ZRiX#FSTZYm>@sb8jxr_jsERJ%bytr` z84}KxEH9NtWPCDY+lpFeqO$8~3lya17bQ!a(Ck0`0u7~uB#+HP8pj$Smtz+YbC(5; ze97iAn{soL5Yh}%h^q>0Amdtm;W?d1>8DSa8F^Du#BM)qNZs~raNLe<2zE;AnP$%++y6wd+a@o$nIoAZKtq@K|JZux;M#&EUN|;Sk`vpuZR^Cgy<^+9 zlM|oVwr$(CoqW0XzVDw`uWHw-nOU{=OwaVpucy1GhiJ}i)hPu zauD+Y8(;gD5DQD1E#*m@#~`QlkqfO<>v`~JC?Zwj3*Oi2;&c3c)uyWYr$%i0u9RbT z?UB|mec(bFy7fjsTmDTO3OndvEa~SE(^%cW!f4f2U6~koAmum(5G+xXW#pM{#UK3a^d#2{9yRjCGo3u!IU=evD=gU(+ zVImk^7;D4OoSD|I?Q0m58=30dUL$Q54*V%S-fMMHWw&`{v(i~d2AGVlA7Q0iZo0i6 zGqT4$vPl_;=~(oVzZj7ssJI*h69Y|(408E2;Xyt|u7OB%b78V|p8UWu-k zUCFN3SrOC_8n`iZ1PSt2k(e+wpN;3=DqB2D_{VhjK7w(COz+u1&eayhUtFctak3t) z!=)9v$+aG`=_$w#FmCvrt~8+{#DcCSRbzR3l479DbE-AYFdd(OV}MudvvKuDaNDZ( zPT=*!;UH5RDTCs?PxXgS*njxM^@mSRj9#6CknOhr^@l~*wn-0q9-6HEr(rfL*{Su! z!=Fy;P0;OXLnmUW3M21`>jP^V);-Hq=sGuzbt`_=$?*j%DO!G)yv*`Kwxt=3ua0#ruh)#&Du9-8oe5xn}S2MtRHq-7j-%A z_zAq!s#jGyWt59&VAt0)P&PCci0V>ma0qlNc z-HFguj=@E--A9XQ;Pi-9@=wpd8cbjfwHR~b{lU_s=S8qlKW-Z^+n9DTSj-BTZ)F>` z$})0J+Q6bxfnR23iwiU}1k-BWa6UNcz|*>0d&eNEId^nK3+@h#p+8J> zFXD)Vr{SBfDE#AKpG$gm&~Y8tuMW_{NsxYk+$IdI#bIxqC_Tv!UYLkJ$)|Codgks| zhkvW`3#0csAT&eg!y6{0@fUr^nC#OSLGEMPRO2p1X)Q5KIuhJ)a(N6=sLaui?tfd7 zz9>)*6Z5EPfxc%9WQA7wD|e`#*t0Y>M|Pj0TKLn6W@?!7RbJW>zWY90Vx9jPCrp%C zQAaeiL~b*eiBc|3ZX$8Lh2a`|wYM&&jpSM_4JU(+hyLxX_`36&#vj&R zTBWnDhKr3eF>@Izh2@G=J9(u_l`&L09=0&Js&!#6l&iyEBYXq~#Z&QJf-IHb?2n%P zQ?Am&m_gC6GK%JlN-(Wvuarx!@5BFu!O)FwrM!~e+&V=480}HC6|zZbMYP9AmKUAT0IkKn9%Pe!+4nRiC7AlBoDO_GtiArFs6`KERD46{ z3PaCPDcUn_JGLTc2%=g^AD``I1FH!TLTwcIb4Fxkr;!|k&73xDW)Q-p6`%88)C?(P zbbMebZ?;up+eq|VaK}#r3MLn1lH!+N+8kYQLu!s$qcS~oOP&}PcqbnjJT=UI*!yR4 z>tecRuO%PD7L$^D8N)kv$1Ru2w{9?Y9Tn#+{}AQP#Tu1&mCbITW!7HP$z&~0JNhXE zW!SzP)Q2z?!*DUbCa^ZZSIr)@$w6E(I?c=(k2N{-G<<2A#ufFSf7j?W>Eb#i=*sC& z)zE0mB@Ur?{**P}lQt2@=weYVk@a#9G5IcRT5O^6S@Hm>ipc$RK*rejTXybKM{@U- zyFALGD+O(XFLZReh4j!r*>mnW;GSaCsR}JcgYQ&DZE|jcU8m;Oe=9Ik1dd166@|Hn zN)`!=A1s$Jz#|mPR*u)cN*l?K^CaaC%)5eP>#z9wZ=avQmlBl@tJ4-n9KeweL-;m< zguv@ELfx~Cz9uI9lzqc=gXbFBwoChw+Rm~EQrtc+pW#^^bry9Di` z`yP=@tPjxFV5brv+(sS^JTkQB=RH0B|E$$*e7(>2pY_b%@gx5|pBS$Dg^u|@AFP_R z;Sk2^v5>n>!gRxVk>dZ4jDGr6o7U;Jnq=k;obHzxm=HZLakL|wz0`|;+A(p%^gjF- z<&kS@t_IX*EeY?O{mC2h8YrzphihyNP!_8@h&gf$fcRs}V zYNouVoEd|+e5V6ywUph1L zXicwiCHQg*stel75u*6H&Xno6W?0%=DI0WrHN+|-9j&+SCAv=BO>>v`&+R`Q3Y>5ar|o7$J~k# z5Iv7>`$(eXtm9h|+MnKS=JMFMA^_A!Gj9`MnZOa)Z!mnVJs$&1+*B<;<2J!i3t=wXMxQlv*EB_Bw+bjn35Q zp4l!cx9OkqO4g9xX9lfe4IVt|{cnQOy`kg9DZ&pvgWSnjd7EiJ3*Gn)iV{3>$BS}N zugN#)AJv;_9ieS3;ge#;jd7MD6m5?+U@eq~|6T86ura!5#gJU4>5@_%5EZnIk{qVF zh=%YOcd(}yWB51X5oYySV4V&>YFg{mcG7wS3K z7&P5vSte}soup(JeuNoBVJ`?t@J*v}*!S@GWw`$Tjb6u5E?!0BWQz=|Aiqb!X>7@f zqu31!@3=(Y<*cc;6zMu|aP26auPNQd6lQLigv;r@jh8;jY7yKpdHY9e3&V2oh18

    PgJYFjlS*?lrk)ZwJImp8DklV%-WF-~Q&{4+{tbg}Z!yr#Hmkv>M=g))-PICW`< zSo|l2vwA465ZnqMrBnLajuTvDoVxtcuZ?~*lGF=fcDBrUZQm5W@joP^Pt++Uefv*E zsH-Mm{6C3bwy|uh$tbCgm`RphRz5vcOcJ@|)C^1#m{YP4ua4$FVwU|?{*R2Bdxx4j zrr12f>^21@#WW%>ZipZ9a*;ZZY&IK==S~&~U}CvR%oCIg$@&jq-7sMqy9)C&jX`^=zWL#SuVKwry{gt z7%7e==vOhitmx$p?+JIWWr}(|Zx>?nGx05 zrk2L-Ytu;2cU%*dWud6Zp_ZM>{4FZCamuTQOq(6mAOv&(j%9+6;nfo;e? z{WC_YOp!1_lvv|NQZX0R>(2r?crNOrq!IwhG=*I!rZ=g=5vtf2wsqdpEVG}HbeR+D ze#|~%NGgVM+S8eD(4F)@HL+?gmpOZnAm_jDJWPtNN81v*ekB2El#Tg2J(%wL4Wh>jowBi2qP#&v5<YUg_Dk>u2cDtWuG$TgnX1(X3kD7TOmXw zJ|&xflh8p_KH1C=q?uVn9HX56@R-sxdn~I#DQN@8A(aw!9sJ(=>O}}8h&a?-IbF6D zFm$(I_$9yP4~Z8S}DdX&38_M`>rwGL6S4loMB~C@cuyD)9+xZEO<9 zdk1>K3>6xsNTnmS-7uB%cBShBL-w;L_OlGyEL_?*&XK<&vnQLA|0=eZ1C7+K9PvGugw} zOi#o;Jqp3aS3?i{u;Ci@%2=8MHC|Btww)WCT*)V>-hE{Tr9^)LH{50FT;o;fC zS}a|p9R26X+VR6!Ec~fm+Q3$sEvqKG6xWE&0`EHx1XZt{Y$dZ@ zW}pD+$&m2$QJ^dtfRuY=OF}RNz0I0OAViHLs)M)l#wd_R^ZqRax#8##KKoj(`pq2g zfVkLW?jDHqr1e+Gnhw-s(P!6D&9u)pV8N^#MJk%a|9}_M@i*NrEhg@oBb`-Hu+^7t z>_`pEq@Fe-z^_9QFrd-c<^Zih*&48MLbw+KTKglBhCu|EXxuOJ$(&LC_>B17)OjTbl&5dCkvkEYu0D& zS*=ms|Mp~AqltQ1D;(QVc+W=CUcEl=-D~{p!ll8xaMiBB8|pojNBDXx;Wv5uQA2)* z;^*XScfa4P zu9ZX7TU4|3o7nNCvweDSrn!#Kc9W;%YJDq3!h=WVYT|UYyKR4Yt(k^w)8L!Js+i(^- z%WTCuEt|BTOm)_;7XwXaWZnZ^bY+!c$B6kY7(Fc1xbdgTR{yf&PnvaFYR7us*IhiD z_Wz+BR_apKlx-A+q2O;E`SyKM=Ej{Y-?d-EPPOP(YrF?OThy^*u_#`2V*4i+xTJZY zbx5y?(`{Z$7V&x5aSC62`ByEvP0+3hhrQFpm*+BMvw|NJv2l>TBIo0>D>v$G|0x(` zTrz#CJiy9yzDdo&ev$Z>ik+%_$o;Firm3?cxwpaGgPq#WW&?Zni3B%6y%44d@ z3^x3mY;{I+aNJ}{C*QVYC3jcC@+8lLHLnlM!bLEGnq=GeS9K)2JU;(TZm8?^_v`6v zz>N%D#^u)@K3#}j^jrBjf0u$`6uAV(2S2__r{&5Fu$ajf^*r?S>GOIqmeWz#;a$aj=)BTVMlOM6xV2Sl zzRYwyFW+~Q@2hTxN}1O`176yXnwIQ^mw>fQL~)(!1{+_tbc8pZ`USG?FveJ^j|G$& zhZgHp+3M{{sp_&d13R`MyM0pJRIcp~YCcn(?wJI^eaB->7q3z0g-M3yX^Nhm`AqhL zV)m;w3wFE*@2OGiX1*|7W~}YuVenN`v5OIxZ=;Rw_wWp;)hJjmB$x3zURxMdtF|#kos4r zwYdV=A~ELR>+J51Yjn^-V?K2@mCb(i(~_w8`zA`3^x+MM*$x+_f1({2PneW^+#OVl zLDSv~ozT8T2%)hOkwS^YTZ)Hi#iwdUxRWUhJbEmW+uuKiGZxXz(SPi?G7^yv+^2`I zQ=J-^_3;?dZ;7^lvrnY^zjLcTTG2mC@xPtFOT}2HD<4xkO}A!!#c+*Q?b|Nk-_h`+ z@t)sxDnCQJYi{SLxbDo#ZoV=Si!73+NBJ7Om&}0pJtf@IS0z&VlGD^qH`V7? zD;coxD$y2G$+8%{m20Xlow<}bIabh@eir&nuhq!*VwO+J{iBMqkj}F0Sz|(@_S25^ zMBQ;FMZMe6hg8|QNZ&I+0l#bO{J*dI_uSmKt!bBL{_d~>u|#_AZMOy-)z9L~#Y6;n zOR@I33CUoGMZKxw0n7aPuRCdY-KqC|A6azl#!gIX(hOt$oXKO1#Rsj4YzHrv<*$!H zu)$PWpE1fLZ1%*jwA~gVb{9;U4;g>j@~S!f7>rBrw%0yc`u2M#ripjRKs+PFNw%V5 z(YLLZbc7P;O}cl-$=GswO|R8VOz%*d1Ptu8-h|&;UR?UYOf-qFfRz~WM+s-IPti!H zqiJ3XuH74bCfvt04b7~?5oaCt&ZZppdEk;(mCDuI8_`I;2U&>a*&3->V^&oa8Qgjw z=F?~~X`)RyY_3mA7QJq5?C+aPyBynY@2|;rM1I{4-#1p@ujX&G6#nk_$!Fi!Z+U!Q zj!&tXuiO2SpX76@ulM)ErK|k(E4t*{c`QEv=OKPY-27Gle988={q61R=6tC9#0x&Z z*Zo;&>~wBUxA*hu;^6bg*Kn-g^Em#N_k9`G6g3Fn!Szd*dVfw1&ik7lAK&-W@O5qu z-{+k#@P75mmdot3w z*;rp_PR3$4qjf`$d<#tIEKQtv8gR~(us>aS>?Z3RNJu7y=uX)S{As5~1%^3ZkoE_$ zn3eY5@}P)-F!zIKJ&0x0CzkyV;{k|e>_$X%ehDNPy3#W*T=RY%bq)`+$2vYImZ#;x z5|JEAF2i2E;1P{ROFj9s0?$(HAl;ug0Ln}@)nI8TiE;74GXvF{0!lUmZmNTLlzu-B-TDX5P@U=}@BH;>x_ql$ zYY?&(Ny*b#8envvI(`Upe?K!pnKFM__(fs899ZcCs&9$TZh8J)$p_j~kI7>(R8+xi z&hqJ!W+eUhVlJ$WbtFXJNpYw#}-C%RhP9Xj3?TR^uyTG8o4_hI`?qbK&XXSc< zrUTgIhT@Q2Eo^Dx-h%l8>$TFUkLmw&Jy8)8G$>OK|yB$poA{3pZ zm2Qv76`Td1#1xpU%CaYrX6LViL-y?sx39;@<8SxdL3H|@Z&=(}lR_qylnp|;lpihO z*Vpj(=cDUM_mAH>qTbhNbgIcmx$oE4V)OQAl83rwFs1I-Yq^@9@8cc4@5>jy9#_LN zSWe0=3KmMZ+z6@1gpr$o5lkk3dPJtA43P{iG>*S*=vjOO&CVzmGkTT0H3}bEoQZbN zd%4-~<$8$i&?sj)wx%11=+tZ7Y6HYBKS z9ugR0#2kr=q>tkLj%hhR2}1^soZcC=hS~m5cwEi72XWF9hkke=?Q%E+iY1kQqE${4 ziZF5=j)C(ZJ;BUA??7f&tf< z3L`Vi$&q-LN_^;Q)_X>{jGQD38QXV13$WBTcYP4pX4I7tg5E{De-TqeSwg-P3_24< z6DstiQmsKv@tAC%$^hAB3%<%dBfjai?BebNGqNOgw0$2?EcsQAhBD<4+on6gNju@q z8l4Q)^ANw3PU!GCW47>&KoKkE7pz}xj!!=7PB=7cnx8`|+{HkaTGc(C>;cV>Rm=a( zqW%&?vc74*Q#w8lwXbnrFV4S5jgBD$RSxRNyRs~I#+KyL!8WqrG>G-Q%m z3pYOCs&VoyJrL^L@v%7QId4iGH54>0yLg@pYjSddrR0lT&;zsg{WQTmG#@M%j>UlL-a;m!5ghI_@#HO~qtsdjnC*v+QQp!r{R?T>*QkN+0A z2a0Q0{Ou5?#?98f-ry33k>rroXt*y;p*Pa!n0V2Knw9wUj#Fh!!^!Sj>7?qFztU)s zvh|!hXH+EjU-&nl2%X=d2YP7*V#YH4-(9f``m;a!*VG=nvAVlhxMjgH;1uI=A1Vb-j_; zAkexuF;vbP+w?5hJsPE|Xa9j0i0vT0?;Z=ti#p7Vba`R)TOes2Z}X?!X4?_$P5Liq zW5$+mcfBfgtvnhq&YQ-Wt%1k?rWH)l;yRS@95-iZQ}S}h}Y4I&E!D!(X;pl-Qxu%Axw-)P4wP&o*Dk4yvUiH*{T?-8z4(_h%gjNxc=$ayUqQR&gLQSg~I-)7^LOQ+J*2Xu(Gk&{R7=k@r^(Wjcc$@7! z2G`As+T|$be88@dPUZF@GnSeksIZF1eBss(p-22Ai-b*o9{pA&^3SNKK*o zH%)3xbcDV5nneADu&e^JmTXAr>8AE}Gog{JH?2rD0v?|;mo9`44y8-Ytr3Qdks1@O zIDeAbIH~BGlQpC*5)~sHio&Qu-as=?5|2ZBOrXT8b+Ew=?zV)Gxi1Lobs_IW`qBJQ}iTElydos`$Ip@Je7{OFt(n5a*qiD_v~(8m54NQ;Y?dC?Q`6^s%ew`*kekc*Ngivt$RY z2_ZV|m|2qcSAA7V>@s?L0F`)ECET99&t!sR)rxYqE>NjS^6VY~k`b<39qLlZLoxV9W zl=av%Dhs%;$a|@}Y?zo;?gU#;2TqhkJUX-KLBfX?nCjvgi{#4~Y66s($pq%G07AO$ z%2dtBP_2?`*=l5T7}?_WWrOqhH`vmCkdNIim2Ey9no z2QNkDtRDj)#pS_$3EULjSkT`^v8wl=g7NOu&=It@79Faup-lH%ai}xG@fhafQ~43{ z3m-B;O8{_(r01yL^R};K?UL2=<^51*fxXzD>q6E)5zKU<<=nq?@T=@}%nm)n4#*Y? zGw2PFh`Gs>KuM1OnRuo?u+GaCgc3iz;i@!^&*8Zu@`mI>G69p$HKR;0|D~R?y#6_r zIA7i{e(GF2&Y@;ogsPw$F!KtGwwu)oPa??R4U|YyYh)&64=`vibWMZ699xHIC8sy` zd#a*HQg}720CHO&>f{``+}Xl04sT^IAbz_vio!W?VGu(Az+ws;Qkk*TuKv<37PKWy zCOtBVYju@nNy-HTWw6e&#Py8b1qQk8p-R;H&?!*hZZ(bY!q*J|snJ6$0apRyGVm!KOY?ElwqI^h^HkaFJ?h0-FUqcn)@7aTl1THzS(G`xetS-@L#Pu5`#&u!N? z2F|EsD$MvUkV+w;$_xie20L^w5K`aT_`>ScL zm&8y>L;ZrFozj9~Sb>LLF7)U^I?JH!RYxBdw|N#my>s`BM=IBw-Dw#|cuf-ymBpLh zp8!Dbhxzz(wWpEnXsHy3tsR}uQJUhTshOUAmm7J74tD^60!-lckZH{M@PzZ96?r9P zy5>#gd{;4zfL9mr25$JJ4pZC-C|BtMNQdh;9IN^egE4Soyyy z^6Ws{>S8b+8E;)O@n&ceImk>U5kwA9TH z!7GIOF7f=;6IRtaJ!)3Sdjon@8&G?SQv!p#tsog^G1s~LeUpu%-g%Ct%-8_Vi1r)z z#swMX!3zE^pehB#-ISUt1C#E~T347Q6YKAw{$zdydXmcUhF6lVm0`#j5lGj^LB~c^ zYB>o2GnS-vrT)_)1o1c3y9J!H8r~a<^E{CtW4-4EbAk#$a4#d7QX z(3|p9NYpes)Lm>#4axyC}n%$s$a^fq$b@S(vH~`fPMM?M@B+kEIZ86`p z_XcNP`z`iQP+OLcVX+4sKF7>kbPJOhcN4JU?YRmA73Id+dz|To@bHwSObL-;T|uaL zuDl*@S8^;l^}fE(Hw@UPI-g&9Cn$XR7Z6ilT>A&~0A)<;XNw0r zBJ?RpSl8@Le_yU7*@X#FkX)QSked1x@lAgpD=ZJQrTJ(%UwwG)e!=OB37LX-hLdgH z3})uQO=BKHnPoU)PAj-Tb~{6kJZN=d+;XR&x*`S%d-r@4O|D;SyErYUzW-05B$Lf% zOd1ggXqN*B2=)I6CFTYI6JsSOfQ7Bue`S+GYz^D}kwi+{q~uPeiA_s%WL24t-hIulJ{Hc`OplO7rqr8vX%6L10qGpKX|{{lT5ycWMN)NDElPS%(D+DJaY-kQZ_8o+9SA&4TpQX*oR0MJ;#;gGyGEs?tF1k28~?1nPu+ zJa=Ao#B2yKKrE~2<}*k1D@`+qc1*?A85!Zu2{l zLAp3LC|4+lGKiPw@GmWu1gIy~i?@Mr^QoQu(W)Ct?Wy|IKNvmgF`~WUofFE_m@3z+ zJ{qa1g5j{1Q`8_qAt-p}Ujy|dWln?x;Ox}IqN}I@iN=%BQrS2{u@7AK#Zu$EIAB&L z&PyH~6j)KJ;JkAn@iGN8Iq|kwmG}v#Vi8eJ`Jzm|WQf#Zi;?DUaWqDCt}V-jFa(Vg z9yMcldk?l~Ma-zkT|O2K-|D)Ldv0+w_~YZ72Y9HNnJfoMuyc=Cx)~UqNE^x+CD~1P z^AKFL1y+e}A%QLcy8DrKsa|R|X%6wV`{7WENte9RMXYm@4$H?tzbLx}7Y!i>$*X;q zSgYlO&7H(6zZWBU3j^_IxN7eqGU4lXF5^`5gc5g-4jGz8#grI)@cUk<76!Qj7zy&@ zeU2}&q0tLIvTo8Vh=8}OihZb49BI7-N}*|KI0X@X?j{>nFtid$Vn`>FzXn=;U_FU$ z@V)mI9D+oy{pa?-^nQI)Vd%)O8O`CpVd>*XzYAt=0ER7EO#1wMsFAmBz&^nt9(}QH zXN;B!K912u?i3qVJ0>j?qxS6_U=Gz}Z1dfj)-kK1hU-$=2#$B=cM=EW>&%=#s4d=Q zy|(uc*?eHP!ACmvDYF_F@G~FTItlt?fGy}5=tkuAK&u5yZ5*LA-qyX2J>KThwc{9% zwL^2eLMX(qlADy~)cW(_59;+)Et;-8hfSxhIrdS{l3|Jb13vl3jUUAZ zp1@W(K4x@%d_&^e+$U76NX&eLV8--WBac-|_ghR!eF@l#1h4H>hQ|k~ff!`tt}Kx1 zqP!tR6lac*mP%bwi)crHcUs8I;cY}a)6g4UPsjR?_Yvd1?&nm$(w3Vzb(&y;YA^hq z_7CYY4{H>zbL0aMNmkK2UcJ8&8)lT%x~DSTJ3A7EvLAHOCOM@;&KQ`LX$0{T(Wgap zc$pPxAy0gg1|4xH!W$jaCh|-+RHlvXpem-26lg!`Y6`~QfGifV=cxm0dX8;jIr1=_ zgSyI^X~GCX#~)By)+0rK??Vdsfb78fD&*i3Sn*10RNxC(WOEWJP;9zRxq~R{4s=t< zgA>kSa=OU56R)yr3pL-jPq->VrGMyTa6l=ux%IxwE(Bq9 zOk1T*nK-!9XH+E)Idy)f4EYK+Ubr}*m#A&~2`eYxAuB(zVezG(nP|~5qu}BnvBxd4 zpRAO`I)qfM;;9r>N>mcEfSrAMKzKjqDm2`Il^WO|bL=8J?jg9yWxRDe8aQbkw1KYY~L>coVwv0g9 zC{XoIc(!Ry8nLu?g{Lw{K|(+~WXO!4^qbeN?pa@ytXurbb4OhCPBIBJhRHqCUA1|Q zFlg|Kv3zocLNZ7#eD08m8`KuMtQoDbphA?-S*Ac9abSSyW+11pJ`6N!pc3ChAC%C6 z_P2lEx9YymN3?gBt)L6z^kPwP#u2r@EVwO{wW&8q<74B{NJ$-K(KnO{fPTWW%}`j? ze@4Ec)oPr2gc0EedW27bLGcPICzcEzMOMJ2WD~o&HBtjfeSISLP($R-gTGt!YM&Rb zSE-L6ypo8fuua6Pj8Ynj<^-$0Ba*RIS^Nu^a`Mkmwj?IMC#iglE5#{!5o4oR6VWNM zm4E@+tzvgSzJ=eQ81;AsZDSF{)I}lGxIGc!Jj3JKO4!Lg(;!%&8kRznOF33G)29|Pn~w;wBkmOg#^wZT$MRl5HT>4k zP2V{7YbOQp%)ymq`=6mubu%x8U9pT^E~UE4$3~MeKs?&1pd6Q1v+Y`8BInK8PAuQUOGS0N?$v^6=qgNM{bh6g{23M#gZPT zM1F+xg|F=Tst#F+=9b|}IQ(O@iDn)btMaM$G*BJtR)U(4ro?QjM$In{ZnJ`e{cu31 zck$mvMG>i8IM)k4Tpzw+rCR%o3Q=Fs2f^#LIF5(V1S!vfba5CDXW$UX=OyPHg)KI| zRN}IS^H=6eaJ6ncQ3_IvlSS&U9r+w?q1^&1F*mNR2dII1hD{q*QCoq-7)`XM_HtSt zag~1q6U^HH6@CF4ZzGH4GQi=zs7ZfEjGd;MGnjck zmrMf$O3*QDGPl< zi?e0r$kNbv+T_#!pxlH_H;^&x1IE=^}Xj*zaaz5D0QL{rf1gE#B zqpY6s>PDBSF?L56n!19l3g+5;CVXr_T8-7S$> zNmxG5*s|jkv6^5ash9g@SGoN;f(Ai_KW@<2I>Z`|mYE24<;1lrcGXWG7)Yk1tp<5r z*ssOEzf();#OKWjvP9_Va!%fGS?e~D+A=BSQ5?Z<2&ba!xPOe zy;}`9QDiOQ#PU>$*|O48`_OZ`gAF9zEbv?Jq~7t)RWf{;)btO_j1=yWi=9Ws?*{f+ zo-+#B2hrd(F5yI%!AC0aPWz>*T^UAS4&y&?t~lI>pA%eQKBFOU*)<=k3yHy!&OFx( zGwX`IXzASuRa$}dKR<*jwZM9{Kqctm@;0rReV^*1t}e@Jzw&ccpNc@GTZNs9@zzvU z4hKG;-Rdb&NLG$o*RCDqHJ{$Vxu;0pBHGJm%Y*!eR1i4}vq9Z#S$!>-^$474oDViIQ58yZuNg+`F@qcby zg;Y9t_B7m-C^!;mSrG#}Lv^f#4ENr6GMf1Eb4}9(S`ov%IGd}&jopeHQB3GJC=KN& zf!&aCjF6{-m0?5}uDcW8FJ2s5tXG7jzAKdXKdP7|t7@tO3BFmE zznL-o?#_RC{eN%1i&0N!+CYJTR&arU zi2t|wcC`aoIhvc8I62bm0ZgnN>HnMguNMH_fASL=y?}By)J@mo?;V5n_}R_`0jE9V z`3||?n+-n9n|~@W$OvwM*xf(h#t1R&=ZqDY)1rsjnrdRFT8}r%%QH)e^gb)gxR^cm zdow(!i%{yvAAI(`H+=}_v=(J6Tpes1*4(yLE;qHmU-}oXsbaC;i?}AoJWSMn?m&>v zEdE%1J||HT)vlH;ZRiBpl@Zxqj9Xr(I@}>$C&wm1_^@jCx8qf|wyoQAoY|UPcYo@x zar<^OwRTVcdW@7r2%ek7ZoW}!|C4O?)=P(_=DFA~zVyUCyx7k*A3e4lPWdsgQsNRl zJS_gL@Gga4{cd975?Wa54F-pCpUW?wlQe5=cZXYhEh&3Z@=Im8m`M9KNjNPA81$by z`EQL9*?3p>@5zgc?>28s<)ZzK`)vrI6&a|N?>9P-Mk$2kyTlc~2MGmCun+L?0p$o| zLf|+nm6oQc~Yp>ZcBQx}kOJt!CBd z;S*va2dCfvUrO zydTXxf4=1CaTiLD5r&bTt7kyn1L)}`>S6gxHySy=_J{lsK$0C% zbExHHu>W>2vZa|>*2r)GVMZvHMF8d3GVM%c4u0PpwQ3$)6sEQFO|S|AGAkIt98_s5 z=j&awbK0MA?bAD+6!)DEM3&te(^)}Wis7E5FfZL^P0Bdb-ZFo?VGz0aADfK9n73Cw z;2s|gxTiEo>_-XF4Y4K!?;iy_-$RNKEkAkgp5hCA;n!oVry#BZ`F--hZ`}Hsgir{0 zfaIfqsSCmt99Z=`At-Vz6ZFkeVeo>VyR=~OA*%7)EyXxSA8r8k00mwx6cq(te%%sR zJMp`}C|A>z?+Y3}hFV^{_H~3uH~Rl8&q!xzVSAyWWlpSu@wI;DA*zO~K`()f zjGTO-X+;}poglfkK*DTF)?NkWkdakvRKje)R9eYS6-E&j7WT3ujPLbe5Z^~vcM{kI z6*-b$gUI(vkWbI|^YZPJJpIUtT~f(-GoRUj?3nxe@Z;OHDiL+`FOF=Sm}EwN-~&?h zJTFV+n!{M4tn4q;8JrwKOPHkO1Q6VO$nfg;@u4Sg>EmCg^5SCR=0QFkYu$Ar*XO6u zJBmERes^RpL)Q9A9{6P4`l_K!OECP-g zpGp)w!0IHW@{)QJi!3BouM8}53(57<@$I>+Ut#Sdc|xOhu`cV-<>$=R>!DeoWR}67^zJT9({ROIRQnlkTkC|7;c$$yBq-M%HyZ zO_sqiPe1A;!Mn*e@;hl}l5klJ((XP<)qy zp?&JFx=7xaSDNew4+22P^@4i5;j;usg<%MAu-Gy!^t-7prRBUCWnK+V=#|KO3eppj zrOEG;0B0GvYlY~eHP}Q9*~egq%qhu~@pv5Z%uEjnDpE%@fA%tDNK_q^1Q&3$5)|n& z#K^M8kC?CsCz7V!Sxt)TL;Snooe}+MGUQ}g)}vaj3eD=3(~)zatl0zOYgFr(QA^Nv zE1{O{!o~)YG%wfg`)tZsHQKS>dNas=U!-7{5y9olXtKkOCa5sR>r`W7W>JVZel*7p$8N{`+iHzDQS|b?outpr_IeE+-qL$JJvi=W@zoRk#82B# z6>O>!nSRoStgVQ>0p3xw7k{x$(o?DlxxI?0a8{iB2rO>;t&~bfN)d~KQ7qAV%2aNN|9obhL@Z7sR&{gFH<5nNY`426{ z$TQ}G9Yb%`=GyAV#1ehu4Z|@<51gxdQ{EF=eKsav+Y#8>si)!NtTp9^Q;XA8yuO7z zwCr-r+#`4 zo*93x)KlS0YzzN#if|+bvZFA&t?wng{6%w97Vr(`BiM`RSMIF;lg?DpA z8^OQx^G$cmJD0tuX0Iy{epe&yFgmIFRxi#q>P;&GAIK}t{`%V6eh=#os5;P*!xj8Z ztp$9ed_wFrlN3Z(Cdd8VY3e@-6HEQ(Cx6SjD-2GG;D!~Yv16UsGw;I;9u4|2M+Y@} z*lWHEp)6=G&G<{yn~FTzOc|bBP@=5gA+t)vI8%ndw{S%|!=PL&2SN0ryJipKL<2Q% z5(C5$xBiU$m6GN*W{XP6SY~)zug5Z;ojet)gk|2#czUt#4WxaC%`}Sr!ctY5v{ui& zT)i}2oUCa!y{1{e_9K+57(ZD=;cO8n>k}zH){01;u)DKc)Cwu>3TO=VWmCuWcKOY@Mp>Le}&dxO@HgZZ+$%1VNUqc zNAA8}n`|C#3*~|XDnfaWpY&VD&h;nn)hMsbbf4W_)8eTu8qja~6vR8D9cF~A-S}5C zqC4ywhgTRWk2kF}OlyL8C&VzXtpTp*B_?syYLS2f(taY?-W)ZYYC1;5?+T@OzU zFfJ)_cJr4HPwWrXG3l`QhFdUn`Ues~BiR~o?fIj8EY86U^3eDoX-AwZ_zO5Wh1}Uk z`%*A*vs~4-MPzU?F#y_6bsKzG$4K`dOEUl86zBZ9wYtQld`6?plU%h~6YOqV3O~B8 zFlY9U8VMyWyPqYwCJ)+rg!d;pwEIt}e)vf6zo=bzJ-z54 z<88vbuhKSazT$u1bBn%B-lSjqW!(hZh^dQzK5`fO?mYYP4*6}L1qm=f%9KyG|p)BK7T;5 z<-q&=$v=#(_x+pC^WnXEO{(?$7q#i5|Ed4J@8?zdh4*sxXG)(*>OFaVeF_AxOq2tcas1{c>A6Rd&Z|Z-xE4TfE;* z`Z`_a#Bzge*Sj2da5QjSeX%W^o$YIJWhwI!znHBx7rYF9h>2Z$`PB8&hlQF}_p~47 z1x#}^_kAq8blc08CuIxW7sfnsmOiPz-IP7cUAZFLT2amNLaM-;6(!;hvrLW{>{x&3 zHiyUiQw&Cb)|K?xw0-;}F*A2^>%`S>^?y(K{%WIof39sAc1LQ$v5~$5(4wYQ%SY{`kJLvUR1)mZP2?X1_DuZGJ6d>l^e7 z*o(nj+T5{zTDli7w2FbDCCI?Rn3A6yP?TR#te>1;lnM$ig+(lk8Vf)HIo?h&1ODp2UC2E$P;y(@a|>t~1VaA+(IDU1r^Dk@>=o_6$W=rNPTsg3EunFf9h zyf&VZhf4asoy%<6m|8vIirc+~dv>htS#nPPEMN91yO(?gQpbyR4hH(k9_2N-!YugJ z;~d+Pe@3a?4|tq8g{Ew_=oI?%x-?EL%eNnR(?%3#d z`;O06e~-xjJwG|48Jtva8t^YKdKr7|-QqZZHa)%GIK$M zKO%hijP>IO4SFUYc^{Od+r3)C7;-4`j7*-ynn61zJHJZ9R2z$EwoPKXQH^VYP|jCfAI2?kx3W05CgV3 zq{J+0Srsq=w*Whlpv@tuAlf+}*kjBqi4UnPNG*;9rRV@}R0Fn#8Yq7O>hb{=!Q3di zHv_GN3^9QRme4iw)EJ!G15{$l$-n^0TnKOjSTh1QseneRlJfJjvDrF_eaagaptJ3P z21p@v0$B&Sz?vZzBO8I$5m%+>+Q{8xVE9_a${>Ma4l8g4Ez}Xv;WETP8M;@{ci12d zihKq(1%1B_x&i1bcM%3Sy#pJ7SjUU56@7gTLhJQ!5UuE|ZO~0XA0$MW5W>cY76w>{ z4ABilA9_F-=+6Z<5N+TA>p%p$q38`GgrRIgU_()wN$9#!8wv=$3=Ag1j0_mf26XMH i#XPe1t)fWUp@n{cH!Cm&gVtd4GNb?#Xo&=<{Q&?%DLfYd literal 0 HcmV?d00001 diff --git a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json index 44b5b5707042df..59eb81d2462213 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json +++ b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json @@ -1,18 +1,18 @@ [ { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "bert-base-cased", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 312.06, - "fp16": 345.49, - "fp32": "", + "int8": 245.16, + "fp16": "", + "fp32": 159.59, "bf16": "" } ], @@ -23,7 +23,7 @@ "Precisions": [ { "int4": "", - "int8": 4.83, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -35,19 +35,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 328.55, - "fp16": 285.3, - "fp32": "", + "int8": 2.49, + "fp16": "", + "fp32": 1.18, "bf16": "" } ], @@ -70,19 +70,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "gemma-2-9b", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "mobilenet-v2", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 20.07, - "int8": 17.42, + "int4": "", + "int8": 4444.97, "fp16": "", - "fp32": "", + "fp32": 2395.07, "bf16": "" } ], @@ -92,8 +92,8 @@ "latency": { "Precisions": [ { - "int4": 49.81, - "int8": 57.4, + "int4": "", + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -105,19 +105,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "glm-4-9b-chat", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "resnet-50", "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Accelerator Platforms", + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 36.48, - "int8": 27.59, + "int4": "", + "int8": 1071.42, "fp16": "", - "fp32": "", + "fp32": 472.1, "bf16": "" } ], @@ -127,8 +127,8 @@ "latency": { "Precisions": [ { - "int4": 27.41, - "int8": 36.24, + "int4": "", + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -140,19 +140,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "llama-2-7b-chat", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "ssd-resnet34-1200", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 42.82, - "int8": 33.97, - "fp16": 22.23, - "fp32": "", + "int4": "", + "int8": "", + "fp16": "", + "fp32": 9.59, "bf16": "" } ], @@ -162,9 +162,9 @@ "latency": { "Precisions": [ { - "int4": 23.35, - "int8": 29.43, - "fp16": 44.97, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -175,19 +175,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "llama-3-8b", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Accelerator Platforms", + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 39.6, - "int8": 30.59, + "int4": "", + "int8": 1104.79, "fp16": "", - "fp32": "", + "fp32": 622.5, "bf16": "" } ], @@ -197,8 +197,8 @@ "latency": { "Precisions": [ { - "int4": 25.25, - "int8": 32.69, + "int4": "", + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -210,19 +210,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "llama-3.2-3b-instruct", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "yolo11", "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 55.37, - "int8": 51.62, - "fp16": 35.82, - "fp32": "", + "int4": "", + "int8": "", + "fp16": "", + "fp32": 275.86, "bf16": "" } ], @@ -232,9 +232,9 @@ "latency": { "Precisions": [ { - "int4": 18.06, - "int8": 19.37, - "fp16": 27.91, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -245,19 +245,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", + "Model": "yolo_v8n", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 34.84, - "fp16": 19.43, - "fp32": "", + "int8": 376.6, + "fp16": "", + "fp32": 235.44, "bf16": "" } ], @@ -268,7 +268,7 @@ "Precisions": [ { "int4": "", - "int8": 48.51, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -280,19 +280,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "mistral-7b-v0.1", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", + "Model": "bert-base-cased", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 43.4, - "int8": 32.32, - "fp16": 20.91, - "fp32": "", + "int4": "", + "int8": 75.65, + "fp16": "", + "fp32": 30.06, "bf16": "" } ], @@ -302,9 +302,9 @@ "latency": { "Precisions": [ { - "int4": 23.04, - "int8": 30.94, - "fp16": 47.82, + "int4": "", + "int8": 24.61, + "fp16": "", "fp32": "", "bf16": "" } @@ -315,19 +315,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", + "Model": "efficientdet-d0", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2348.6, - "fp16": 2074.34, - "fp32": "", + "int8": 96.99, + "fp16": "", + "fp32": 66.54, "bf16": "" } ], @@ -338,7 +338,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 20.2, "fp16": "", "fp32": "", "bf16": "" @@ -350,19 +350,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "phi-3-mini-4k-instruct", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 59.06, - "int8": 47.96, - "fp16": 29.29, - "fp32": "", + "int4": "", + "int8": 1.2, + "fp16": "", + "fp32": 0.3, "bf16": "" } ], @@ -372,9 +372,9 @@ "latency": { "Precisions": [ { - "int4": 16.93, - "int8": 20.85, - "fp16": 34.14, + "int4": "", + "int8": 973.29, + "fp16": "", "fp32": "", "bf16": "" } @@ -385,19 +385,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "qwen2-7b", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", + "Model": "mobilenet-v2", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 40.48, - "int8": 32.79, - "fp16": 20.67, - "fp32": "", + "int4": "", + "int8": 1955.91, + "fp16": "", + "fp32": 810.96, "bf16": "" } ], @@ -407,9 +407,9 @@ "latency": { "Precisions": [ { - "int4": 24.7, - "int8": 30.49, - "fp16": 48.37, + "int4": "", + "int8": 1.36, + "fp16": "", "fp32": "", "bf16": "" } @@ -420,19 +420,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "resnet-50", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1401.85, - "fp16": 1046.9, - "fp32": "", + "int8": 389.01, + "fp16": "", + "fp32": 94.24, "bf16": "" } ], @@ -443,7 +443,7 @@ "Precisions": [ { "int4": "", - "int8": 1.42, + "int8": 6.25, "fp16": "", "fp32": "", "bf16": "" @@ -455,19 +455,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "ssd-resnet34-1200", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 112.21, - "fp16": 73.01, - "fp32": "", + "int8": 6.3, + "fp16": "", + "fp32": 1.63, "bf16": "" } ], @@ -478,7 +478,7 @@ "Precisions": [ { "int4": "", - "int8": 14.86, + "int8": 199.13, "fp16": "", "fp32": "", "bf16": "" @@ -490,19 +490,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "ssd_mobilenet_v1_coco", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1308.1, - "fp16": 1201.69, - "fp32": "", + "int8": 678.1, + "fp16": "", + "fp32": 241.46, "bf16": "" } ], @@ -513,7 +513,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.67, "fp16": "", "fp32": "", "bf16": "" @@ -525,11 +525,11 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", - "Model": "stable-diffusion-v1-5", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", + "Model": "yolo11", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -537,7 +537,7 @@ "int4": "", "int8": "", "fp16": "", - "fp32": "", + "fp32": 77.9, "bf16": "" } ], @@ -560,19 +560,19 @@ } }, { - "Platform": "Intel® Arc™ A-Series Graphics dGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "yolo_v8n", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 517.1, - "fp16": 550.33, - "fp32": "", + "int8": 166.06, + "fp16": "", + "fp32": 64.29, "bf16": "" } ], @@ -583,7 +583,7 @@ "Precisions": [ { "int4": "", - "int8": 3.21, + "int8": 12.57, "fp16": "", "fp32": "", "bf16": "" @@ -595,19 +595,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", - "Model": "efficientdet-d0", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", + "Model": "bert-base-cased", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 23.3, - "fp16": "", - "fp32": 23.72, + "int8": 90.01, + "fp16": 73.98, + "fp32": "", "bf16": "" } ], @@ -618,7 +618,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 11.94, "fp16": "", "fp32": "", "bf16": "" @@ -630,19 +630,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", - "Model": "mobilenet-v2", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 228.97, - "fp16": "", - "fp32": 219.37, + "int8": 40.21, + "fp16": 36.53, + "fp32": "", "bf16": "" } ], @@ -653,7 +653,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 25.76, "fp16": "", "fp32": "", "bf16": "" @@ -665,19 +665,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", - "Model": "resnet-50", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", + "Model": "mobilenet-v2", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 59.38, - "fp16": "", - "fp32": 54.24, + "int8": 1964.23, + "fp16": 1350.52, + "fp32": "", "bf16": "" } ], @@ -688,7 +688,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.77, "fp16": "", "fp32": "", "bf16": "" @@ -700,19 +700,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", + "Model": "resnet-50", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1.26, - "fp16": "", - "fp32": 1.08, + "int8": 774.88, + "fp16": 383.87, + "fp32": "", "bf16": "" } ], @@ -723,7 +723,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.56, "fp16": "", "fp32": "", "bf16": "" @@ -735,19 +735,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 111.92, + "int8": 699.97, "fp16": "", - "fp32": 98.44, + "fp32": "", "bf16": "" } ], @@ -758,7 +758,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.66, "fp16": "", "fp32": "", "bf16": "" @@ -770,11 +770,11 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU+iGPU", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -782,7 +782,7 @@ "int4": "", "int8": "", "fp16": "", - "fp32": 34.99, + "fp32": 138.08, "bf16": "" } ], @@ -805,19 +805,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU+iGPU", + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "yolo_v8n", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 36.35, - "fp16": "", - "fp32": 33.97, + "int8": 138.71, + "fp16": 142.27, + "fp32": "", "bf16": "" } ], @@ -828,7 +828,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 7.93, "fp16": "", "fp32": "", "bf16": "" @@ -839,20 +839,20 @@ } } }, - { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, + { + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "bert-base-cased", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 7.26, + "int8": 219.48, "fp16": "", - "fp32": 5.01, + "fp32": 159.21, "bf16": "" } ], @@ -863,7 +863,7 @@ "Precisions": [ { "int4": "", - "int8": 139.68, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -875,19 +875,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 134.16, + "int8": 188.23, "fp16": "", - "fp32": 80.45, + "fp32": 149.87, "bf16": "" } ], @@ -898,7 +898,7 @@ "Precisions": [ { "int4": "", - "int8": 7.8, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -910,19 +910,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "resnet-50", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 19.87, + "int8": 6.52, "fp16": "", - "fp32": 8.15, + "fp32": 3.46, "bf16": "" } ], @@ -933,7 +933,7 @@ "Precisions": [ { "int4": "", - "int8": 51.33, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -945,19 +945,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "mobilenet-v2", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.33, + "int8": 1821.48, "fp16": "", - "fp32": 0.13, + "fp32": 1146.43, "bf16": "" } ], @@ -968,7 +968,7 @@ "Precisions": [ { "int4": "", - "int8": 2995.1, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -980,19 +980,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "resnet-50", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 45.84, + "int8": 771.39, "fp16": "", - "fp32": 21.63, + "fp32": 349.5, "bf16": "" } ], @@ -1003,7 +1003,7 @@ "Precisions": [ { "int4": "", - "int8": 22.72, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1015,19 +1015,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU-only", + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "ssd-resnet34-1200", + "featured_SKU": true, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 34.7, "fp16": "", - "fp32": 5.3, + "fp32": 17.75, "bf16": "" } ], @@ -1050,19 +1050,19 @@ } }, { - "Platform": "Intel® Atom® X6425E CPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 10.31, + "int8": 988.89, "fp16": "", - "fp32": 5.12, + "fp32": 601.58, "bf16": "" } ], @@ -1073,7 +1073,7 @@ "Precisions": [ { "int4": "", - "int8": 99.61, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1085,19 +1085,19 @@ } }, { - "Platform": "Intel® Atom® X6425E iGPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "yolo11", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 22.02, - "fp16": 25.05, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 293.1, "bf16": "" } ], @@ -1108,7 +1108,7 @@ "Precisions": [ { "int4": "", - "int8": 60.1, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1120,19 +1120,19 @@ } }, { - "Platform": "Intel® Atom® X6425E iGPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", + "Model": "yolo_v8n", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 187.37, - "fp16": 222.58, - "fp32": "", + "int8": 360.74, + "fp16": "", + "fp32": 260.26, "bf16": "" } ], @@ -1143,7 +1143,7 @@ "Precisions": [ { "int4": "", - "int8": 7.71, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1155,19 +1155,19 @@ } }, { - "Platform": "Intel® Atom® X6425E iGPU-only", - "Model": "resnet-50", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "bert-base-cased", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 48.1, - "fp16": 51.68, - "fp32": "", + "int8": 73.65, + "fp16": "", + "fp32": 26.04, "bf16": "" } ], @@ -1178,7 +1178,7 @@ "Precisions": [ { "int4": "", - "int8": 22.89, + "int8": 19.73, "fp16": "", "fp32": "", "bf16": "" @@ -1190,19 +1190,19 @@ } }, { - "Platform": "Intel® Atom® X6425E iGPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1.16, - "fp16": 1.16, - "fp32": "", + "int8": 92.68, + "fp16": "", + "fp32": 60.94, "bf16": "" } ], @@ -1213,7 +1213,7 @@ "Precisions": [ { "int4": "", - "int8": 870.65, + "int8": 13.55, "fp16": "", "fp32": "", "bf16": "" @@ -1225,19 +1225,19 @@ } }, { - "Platform": "Intel® Atom® X6425E iGPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 93.36, - "fp16": 95.62, - "fp32": "", + "int8": 1.11, + "fp16": "", + "fp32": 0.25, "bf16": "" } ], @@ -1248,7 +1248,7 @@ "Precisions": [ { "int4": "", - "int8": 13.54, + "int8": 1258.55, "fp16": "", "fp32": "", "bf16": "" @@ -1260,19 +1260,19 @@ } }, { - "Platform": "Intel® Atom® X6425E iGPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 31.79, - "fp16": 33.13, - "fp32": "", + "int8": 1664.09, + "fp16": "", + "fp32": 775.16, "bf16": "" } ], @@ -1283,7 +1283,7 @@ "Precisions": [ { "int4": "", - "int8": 35.83, + "int8": 1.02, "fp16": "", "fp32": "", "bf16": "" @@ -1295,19 +1295,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "efficientdet-d0", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "resnet-50", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 39.3, + "int8": 330.37, "fp16": "", - "fp32": 28.97, + "fp32": 84.22, "bf16": "" } ], @@ -1318,7 +1318,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 4.62, "fp16": "", "fp32": "", "bf16": "" @@ -1330,19 +1330,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "mobilenet-v2", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "ssd-resnet34-1200", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 480.45, + "int8": 6.04, "fp16": "", - "fp32": 302.75, + "fp32": 1.48, "bf16": "" } ], @@ -1353,7 +1353,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 235.82, "fp16": "", "fp32": "", "bf16": "" @@ -1365,19 +1365,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "resnet-50", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 129.7, + "int8": 664.52, "fp16": "", - "fp32": 54.69, + "fp32": 225.17, "bf16": "" } ], @@ -1388,7 +1388,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.26, "fp16": "", "fp32": "", "bf16": "" @@ -1400,19 +1400,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "yolo11", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.49, + "int8": "", "fp16": "", - "fp32": 0.86, + "fp32": 71.89, "bf16": "" } ], @@ -1435,19 +1435,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", + "Model": "yolo_v8n", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 233.16, + "int8": 139.66, "fp16": "", - "fp32": 114.81, + "fp32": 58.69, "bf16": "" } ], @@ -1458,7 +1458,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 9.49, "fp16": "", "fp32": "", "bf16": "" @@ -1470,19 +1470,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU+iGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", + "Model": "bert-base-cased", + "featured_SKU": true, + "whats_new_model": false, + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 41.37, + "int8": 260.56, + "fp16": 194.71, + "fp32": "", "bf16": "" } ], @@ -1493,7 +1493,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 4.83, "fp16": "", "fp32": "", "bf16": "" @@ -1505,19 +1505,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU+iGPU", - "Model": "yolo_v8n", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 67.73, - "fp16": "", - "fp32": 36.05, + "int8": 12.97, + "fp16": 13.56, + "fp32": "", "bf16": "" } ], @@ -1528,7 +1528,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 127.7, "fp16": "", "fp32": "", "bf16": "" @@ -1540,31 +1540,31 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", + "Model": "llama-2-7b-chat", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 14.29, - "fp16": "", - "fp32": 11.18, + "int8": "", + "fp16": 5.09, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 71.84, - "fp16": "", + "int8": "", + "fp16": 196.34, "fp32": "", "bf16": "" } @@ -1575,19 +1575,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "mobilenet-v2", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 273.98, - "fp16": "", - "fp32": 169.54, + "int8": 3708.8, + "fp16": 3055.93, + "fp32": "", "bf16": "" } ], @@ -1598,7 +1598,7 @@ "Precisions": [ { "int4": "", - "int8": 4.05, + "int8": 0.39, "fp16": "", "fp32": "", "bf16": "" @@ -1610,31 +1610,31 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", - "Model": "resnet-50", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", + "Model": "phi-3-mini-4k-instruct", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 45.27, - "fp16": "", - "fp32": 18.84, + "int8": 0.5, + "fp16": 8.5, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 23.76, - "fp16": "", + "int8": 1995.15, + "fp16": 117.55, "fp32": "", "bf16": "" } @@ -1645,19 +1645,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", + "Model": "resnet-50", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.76, - "fp16": "", - "fp32": 0.31, + "int8": 2072.98, + "fp16": 998.42, + "fp32": "", "bf16": "" } ], @@ -1668,7 +1668,7 @@ "Precisions": [ { "int4": "", - "int8": 1317.43, + "int8": 0.74, "fp16": "", "fp32": "", "bf16": "" @@ -1680,19 +1680,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 98.2, - "fp16": "", - "fp32": 45.36, + "int8": 231.17, + "fp16": 198.89, + "fp32": "", "bf16": "" } ], @@ -1703,7 +1703,7 @@ "Precisions": [ { "int4": "", - "int8": 10.52, + "int8": 7.55, "fp16": "", "fp32": "", "bf16": "" @@ -1715,19 +1715,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU-only", + "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", + "Model": "yolo_v8n", + "featured_SKU": true, + "whats_new_model": false, + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 13.77, + "int8": 398.63, + "fp16": 498.67, + "fp32": "", "bf16": "" } ], @@ -1738,7 +1738,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.14, "fp16": "", "fp32": "", "bf16": "" @@ -1750,19 +1750,19 @@ } }, { - "Platform": "Intel® Atom® x7425E CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 21.58, + "int8": 35.35, "fp16": "", - "fp32": 11.78, + "fp32": 15.52, "bf16": "" } ], @@ -1773,7 +1773,7 @@ "Precisions": [ { "int4": "", - "int8": 47.39, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1785,19 +1785,19 @@ } }, { - "Platform": "Intel® Atom® x7425E iGPU-only", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 40.0, - "fp16": 34.31, - "fp32": "", + "int8": 47.59, + "fp16": "", + "fp32": 29.85, "bf16": "" } ], @@ -1808,7 +1808,7 @@ "Precisions": [ { "int4": "", - "int8": 34.13, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1820,19 +1820,19 @@ } }, { - "Platform": "Intel® Atom® x7425E iGPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 414.66, - "fp16": 324.8, - "fp32": "", + "int8": 0.5, + "fp16": "", + "fp32": 0.18, "bf16": "" } ], @@ -1843,7 +1843,7 @@ "Precisions": [ { "int4": "", - "int8": 3.49, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1855,19 +1855,19 @@ } }, { - "Platform": "Intel® Atom® x7425E iGPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 106.34, - "fp16": 64.69, - "fp32": "", + "int8": 745.03, + "fp16": "", + "fp32": 322.13, "bf16": "" } ], @@ -1878,7 +1878,7 @@ "Precisions": [ { "int4": "", - "int8": 10.56, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1890,19 +1890,19 @@ } }, { - "Platform": "Intel® Atom® x7425E iGPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.16, - "fp16": 1.32, - "fp32": "", + "int8": 165.62, + "fp16": "", + "fp32": 52.3, "bf16": "" } ], @@ -1913,7 +1913,7 @@ "Precisions": [ { "int4": "", - "int8": 472.59, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1925,22 +1925,57 @@ } }, { - "Platform": "Intel® Atom® x7425E iGPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 211.07, - "fp16": 137.13, + "int8": 3.31, + "fp16": "", + "fp32": 1.08, + "bf16": "" + } + ], + "Unit": "FPS", + "UnitDesc": "higher is better" + }, + "latency": { + "Precisions": [ + { + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], + "Unit": "ms", + "UnitDesc": "lower is better" + } + } + }, + { + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", + "Parameters": { + "throughput": { + "Precisions": [ + { + "int4": "", + "int8": 329.75, + "fp16": "", + "fp32": 113.37, + "bf16": "" + } + ], "Unit": "FPS", "UnitDesc": "higher is better" }, @@ -1948,7 +1983,7 @@ "Precisions": [ { "int4": "", - "int8": 6.2, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1960,19 +1995,19 @@ } }, { - "Platform": "Intel® Atom® x7425E iGPU-only", + "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 60.92, - "fp16": 44.64, - "fp32": "", + "int8": 77.32, + "fp16": "", + "fp32": 35.51, "bf16": "" } ], @@ -1983,7 +2018,7 @@ "Precisions": [ { "int4": "", - "int8": 18.51, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -1995,19 +2030,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 45.34, + "int8": 31.71, "fp16": "", - "fp32": 33.5, + "fp32": 12.08, "bf16": "" } ], @@ -2018,7 +2053,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 46.54, "fp16": "", "fp32": "", "bf16": "" @@ -2030,19 +2065,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 57.78, + "int8": 43.74, "fp16": "", - "fp32": 48.75, + "fp32": 22.34, "bf16": "" } ], @@ -2053,7 +2088,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 33.35, "fp16": "", "fp32": "", "bf16": "" @@ -2065,19 +2100,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.56, + "int8": 0.46, "fp16": "", - "fp32": 0.51, + "fp32": "", "bf16": "" } ], @@ -2088,7 +2123,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2376.04, "fp16": "", "fp32": "", "bf16": "" @@ -2100,19 +2135,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 525.47, + "int8": 758.81, "fp16": "", - "fp32": 392.65, + "fp32": 283.89, "bf16": "" } ], @@ -2123,7 +2158,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.99, "fp16": "", "fp32": "", "bf16": "" @@ -2135,19 +2170,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 197.41, + "int8": 151.44, "fp16": "", - "fp32": 115.71, + "fp32": 39.19, "bf16": "" } ], @@ -2158,7 +2193,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 11.4, "fp16": "", "fp32": "", "bf16": "" @@ -2170,19 +2205,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 5.38, + "int8": 2.64, "fp16": "", - "fp32": 2.71, + "fp32": 0.75, "bf16": "" } ], @@ -2193,7 +2228,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 506.82, "fp16": "", "fp32": "", "bf16": "" @@ -2205,19 +2240,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 316.13, + "int8": 312.97, "fp16": "", - "fp32": 194.29, + "fp32": 94.2, "bf16": "" } ], @@ -2228,7 +2263,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 4.81, "fp16": "", "fp32": "", "bf16": "" @@ -2240,11 +2275,11 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "yolo11", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU+iGPU", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2252,7 +2287,7 @@ "int4": "", "int8": "", "fp16": "", - "fp32": 80.2, + "fp32": 31.35, "bf16": "" } ], @@ -2275,19 +2310,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 114.67, + "int8": 67.82, "fp16": "", - "fp32": 78.26, + "fp32": 27.07, "bf16": "" } ], @@ -2298,7 +2333,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 20.66, "fp16": "", "fp32": "", "bf16": "" @@ -2310,19 +2345,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 11.77, + "int8": 49.73, "fp16": "", - "fp32": 4.32, + "fp32": 26.8, "bf16": "" } ], @@ -2333,7 +2368,7 @@ "Precisions": [ { "int4": "", - "int8": 87.73, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2345,19 +2380,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 18.94, + "int8": 73.58, "fp16": "", - "fp32": 11.49, + "fp32": 48.37, "bf16": "" } ], @@ -2368,7 +2403,7 @@ "Precisions": [ { "int4": "", - "int8": 55.76, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2380,19 +2415,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.17, + "int8": 0.69, "fp16": "", - "fp32": 0.04, + "fp32": 0.3, "bf16": "" } ], @@ -2403,7 +2438,7 @@ "Precisions": [ { "int4": "", - "int8": 5772.15, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2415,19 +2450,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 301.05, + "int8": 1047.6, "fp16": "", - "fp32": 132.91, + "fp32": 530.76, "bf16": "" } ], @@ -2438,7 +2473,7 @@ "Precisions": [ { "int4": "", - "int8": 3.6, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2450,19 +2485,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 51.66, + "int8": 234.13, "fp16": "", - "fp32": 14.45, + "fp32": 87.8, "bf16": "" } ], @@ -2473,7 +2508,7 @@ "Precisions": [ { "int4": "", - "int8": 19.8, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2485,19 +2520,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.89, + "int8": 4.73, "fp16": "", - "fp32": 0.23, + "fp32": 1.74, "bf16": "" } ], @@ -2508,7 +2543,7 @@ "Precisions": [ { "int4": "", - "int8": 1118.71, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2520,19 +2555,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 115.03, + "int8": 466.04, "fp16": "", - "fp32": 36.99, + "fp32": 188.24, "bf16": "" } ], @@ -2543,7 +2578,7 @@ "Precisions": [ { "int4": "", - "int8": 9.06, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2555,11 +2590,11 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "yolo11", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -2567,7 +2602,7 @@ "int4": "", "int8": "", "fp16": "", - "fp32": 11.94, + "fp32": 65.2, "bf16": "" } ], @@ -2590,19 +2625,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 25.97, + "int8": 124.43, "fp16": "", - "fp32": 9.66, + "fp32": 58.02, "bf16": "" } ], @@ -2613,7 +2648,7 @@ "Precisions": [ { "int4": "", - "int8": 40.21, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2625,19 +2660,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 43.69, - "fp16": 33.8, - "fp32": "", + "int8": 39.9, + "fp16": "", + "fp32": 15.7, "bf16": "" } ], @@ -2648,7 +2683,7 @@ "Precisions": [ { "int4": "", - "int8": 26.56, + "int8": 40.17, "fp16": "", "fp32": "", "bf16": "" @@ -2660,19 +2695,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 73.58, - "fp16": 58.53, - "fp32": "", + "int8": 56.09, + "fp16": "", + "fp32": 35.64, "bf16": "" } ], @@ -2683,7 +2718,7 @@ "Precisions": [ { "int4": "", - "int8": 25.45, + "int8": 28.71, "fp16": "", "fp32": "", "bf16": "" @@ -2695,19 +2730,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.48, - "fp16": 0.52, - "fp32": "", + "int8": 0.57, + "fp16": "", + "fp32": 0.15, "bf16": "" } ], @@ -2718,7 +2753,7 @@ "Precisions": [ { "int4": "", - "int8": 2110.65, + "int8": 2072.23, "fp16": "", "fp32": "", "bf16": "" @@ -2730,19 +2765,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 671.35, - "fp16": 504.8, - "fp32": "", + "int8": 949.06, + "fp16": "", + "fp32": 456.56, "bf16": "" } ], @@ -2753,7 +2788,7 @@ "Precisions": [ { "int4": "", - "int8": 2.72, + "int8": 1.75, "fp16": "", "fp32": "", "bf16": "" @@ -2765,19 +2800,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 203.17, - "fp16": 118.59, - "fp32": "", + "int8": 184.18, + "fp16": "", + "fp32": 52.94, "bf16": "" } ], @@ -2788,7 +2823,7 @@ "Precisions": [ { "int4": "", - "int8": 6.3, + "int8": 9.62, "fp16": "", "fp32": "", "bf16": "" @@ -2800,19 +2835,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 5.09, - "fp16": 2.78, - "fp32": "", + "int8": 3.14, + "fp16": "", + "fp32": 0.91, "bf16": "" } ], @@ -2823,7 +2858,7 @@ "Precisions": [ { "int4": "", - "int8": 210.41, + "int8": 466.62, "fp16": "", "fp32": "", "bf16": "" @@ -2835,19 +2870,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 396.07, - "fp16": 221.18, - "fp32": "", + "int8": 381.86, + "fp16": "", + "fp32": 133.9, "bf16": "" } ], @@ -2858,7 +2893,7 @@ "Precisions": [ { "int4": "", - "int8": 4.3, + "int8": 4.17, "fp16": "", "fp32": "", "bf16": "" @@ -2870,19 +2905,19 @@ } }, { - "Platform": "Intel® Celeron® 6305E iGPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 121.77, - "fp16": 81.6, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 43.41, "bf16": "" } ], @@ -2893,7 +2928,7 @@ "Precisions": [ { "int4": "", - "int8": 10.34, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -2905,19 +2940,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "bert-base-cased", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-1335U Processor CPU-only", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 243.99, + "int8": 91.26, "fp16": "", - "fp32": 157.96, + "fp32": 36.14, "bf16": "" } ], @@ -2928,7 +2963,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 18.18, "fp16": "", "fp32": "", "bf16": "" @@ -2940,19 +2975,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 189.52, + "int8": 120.47, "fp16": "", - "fp32": 154.61, + "fp32": 47.12, "bf16": "" } ], @@ -2963,7 +2998,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 13.36, "fp16": "", "fp32": "", "bf16": "" @@ -2975,19 +3010,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.45, + "int8": 149.82, "fp16": "", - "fp32": 1.19, + "fp32": 92.81, "bf16": "" } ], @@ -2998,7 +3033,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 9.24, "fp16": "", "fp32": "", "bf16": "" @@ -3010,19 +3045,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 4485.9, + "int8": 1.61, "fp16": "", - "fp32": 2415.8, + "fp32": 0.49, "bf16": "" } ], @@ -3033,7 +3068,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 736.14, "fp16": "", "fp32": "", "bf16": "" @@ -3045,19 +3080,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1097.16, + "int8": 2964.94, "fp16": "", - "fp32": 475.61, + "fp32": 1318.69, "bf16": "" } ], @@ -3068,7 +3103,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.69, "fp16": "", "fp32": "", "bf16": "" @@ -3080,19 +3115,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 18.81, + "int8": 536.17, "fp16": "", - "fp32": 9.71, + "fp32": 148.8, "bf16": "" } ], @@ -3103,7 +3138,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.82, "fp16": "", "fp32": "", "bf16": "" @@ -3115,19 +3150,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1120.99, + "int8": 8.77, "fp16": "", - "fp32": 624.14, + "fp32": 2.46, "bf16": "" } ], @@ -3138,7 +3173,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 133.51, "fp16": "", "fp32": "", "bf16": "" @@ -3150,19 +3185,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 374.74, + "int8": 1076.91, "fp16": "", - "fp32": 236.96, + "fp32": 378.11, "bf16": "" } ], @@ -3173,7 +3208,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.35, "fp16": "", "fp32": "", "bf16": "" @@ -3185,19 +3220,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "bert-base-cased", - "featured_SKU": true, - "whats_new_model": false, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "yolo11", + "featured_SKU": false, + "whats_new_model": true, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 76.15, + "int8": "", "fp16": "", - "fp32": 30.19, + "fp32": 123.4, "bf16": "" } ], @@ -3208,7 +3243,7 @@ "Precisions": [ { "int4": "", - "int8": 25.21, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3220,9 +3255,9 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Core™ i5-13600K CPU-only", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { @@ -3230,9 +3265,9 @@ "Precisions": [ { "int4": "", - "int8": 97.68, + "int8": 266.68, "fp16": "", - "fp32": 66.63, + "fp32": 102.24, "bf16": "" } ], @@ -3243,7 +3278,7 @@ "Precisions": [ { "int4": "", - "int8": 22.16, + "int8": 5.23, "fp16": "", "fp32": "", "bf16": "" @@ -3255,19 +3290,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1.2, + "int8": 84.23, "fp16": "", - "fp32": 0.3, + "fp32": 50.76, "bf16": "" } ], @@ -3278,7 +3313,7 @@ "Precisions": [ { "int4": "", - "int8": 1025.52, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3290,19 +3325,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "efficientdet-d0", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1969.75, + "int8": 99.37, "fp16": "", - "fp32": 815.83, + "fp32": 65.5, "bf16": "" } ], @@ -3313,7 +3348,7 @@ "Precisions": [ { "int4": "", - "int8": 1.36, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3325,19 +3360,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 390.17, + "int8": 1.15, "fp16": "", - "fp32": 94.82, + "fp32": 0.64, "bf16": "" } ], @@ -3348,7 +3383,7 @@ "Precisions": [ { "int4": "", - "int8": 6.23, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3360,19 +3395,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 6.38, + "int8": 1350.0, "fp16": "", - "fp32": 1.6, + "fp32": 680.17, "bf16": "" } ], @@ -3383,7 +3418,7 @@ "Precisions": [ { "int4": "", - "int8": 209.14, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3395,19 +3430,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 685.79, + "int8": 361.15, "fp16": "", - "fp32": 242.78, + "fp32": 162.3, "bf16": "" } ], @@ -3418,7 +3453,7 @@ "Precisions": [ { "int4": "", - "int8": 2.71, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3430,19 +3465,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 166.55, + "int8": 8.62, "fp16": "", - "fp32": 64.31, + "fp32": 3.78, "bf16": "" } ], @@ -3453,7 +3488,7 @@ "Precisions": [ { "int4": "", - "int8": 12.75, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3465,19 +3500,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "bert-base-cased", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 88.41, - "fp16": 74.04, - "fp32": "", + "int8": 653.43, + "fp16": "", + "fp32": 290.66, "bf16": "" } ], @@ -3488,7 +3523,7 @@ "Precisions": [ { "int4": "", - "int8": 12.15, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3500,19 +3535,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "efficientdet-d0", - "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "yolo11", + "featured_SKU": false, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 37.81, - "fp16": 34.74, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 106.63, "bf16": "" } ], @@ -3523,7 +3558,7 @@ "Precisions": [ { "int4": "", - "int8": 27.47, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3535,19 +3570,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "llama-2-7b-chat", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.27, - "fp16": 2.55, - "fp32": "", + "int8": 181.26, + "fp16": "", + "fp32": 101.25, "bf16": "" } ], @@ -3558,8 +3593,8 @@ "Precisions": [ { "int4": "", - "int8": 3688.24, - "fp16": 390.94, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -3570,19 +3605,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1966.11, - "fp16": 1346.18, - "fp32": "", + "int8": 50.33, + "fp16": "", + "fp32": 18.21, "bf16": "" } ], @@ -3593,7 +3628,7 @@ "Precisions": [ { "int4": "", - "int8": 0.79, + "int8": 22.72, "fp16": "", "fp32": "", "bf16": "" @@ -3605,19 +3640,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "phi-3-mini-4k-instruct", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, NPU-only", + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 3.7, - "int8": 0.49, - "fp16": 3.91, - "fp32": "", + "int4": "", + "int8": 73.02, + "fp16": "", + "fp32": 41.12, "bf16": "" } ], @@ -3627,9 +3662,9 @@ "latency": { "Precisions": [ { - "int4": 269.82, - "int8": 2003.58, - "fp16": 255.57, + "int4": "", + "int8": 14.66, + "fp16": "", "fp32": "", "bf16": "" } @@ -3640,19 +3675,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 771.23, - "fp16": 382.83, - "fp32": "", + "int8": 0.72, + "fp16": "", + "fp32": 0.19, "bf16": "" } ], @@ -3663,7 +3698,7 @@ "Precisions": [ { "int4": "", - "int8": 1.58, + "int8": 1367.22, "fp16": "", "fp32": "", "bf16": "" @@ -3675,19 +3710,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 705.76, - "fp16": 453.35, - "fp32": "", + "int8": 1279.85, + "fp16": "", + "fp32": 503.42, "bf16": "" } ], @@ -3698,7 +3733,7 @@ "Precisions": [ { "int4": "", - "int8": 1.67, + "int8": 0.96, "fp16": "", "fp32": "", "bf16": "" @@ -3710,19 +3745,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 126.18, - "fp16": 129.18, - "fp32": "", + "int8": 224.43, + "fp16": "", + "fp32": 60.83, "bf16": "" } ], @@ -3733,7 +3768,7 @@ "Precisions": [ { "int4": "", - "int8": 8.71, + "int8": 4.96, "fp16": "", "fp32": "", "bf16": "" @@ -3745,19 +3780,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "bert-base-cased", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 164.18, - "fp16": 107.12, - "fp32": "", + "int8": 3.93, + "fp16": "", + "fp32": 1.01, "bf16": "" } ], @@ -3768,7 +3803,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 250.5, "fp16": "", "fp32": "", "bf16": "" @@ -3780,19 +3815,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 195.27, - "fp16": 164.33, - "fp32": "", + "int8": 492.36, + "fp16": "", + "fp32": 146.51, "bf16": "" } ], @@ -3803,7 +3838,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.21, "fp16": "", "fp32": "", "bf16": "" @@ -3815,19 +3850,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "gemma-2-9b", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "yolo11", + "featured_SKU": false, "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 8.94, + "int4": "", "int8": "", - "fp16": 0.94, - "fp32": "", + "fp16": "", + "fp32": 48.14, "bf16": "" } ], @@ -3837,9 +3872,9 @@ "latency": { "Precisions": [ { - "int4": 111.74, + "int4": "", "int8": "", - "fp16": 1056.4, + "fp16": "", "fp32": "", "bf16": "" } @@ -3850,19 +3885,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "glm-4-9b-chat", - "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1185G7 CPU-only", + "Model": "yolo_v8n", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 10.82, - "int8": 6.3, - "fp16": 1.1, - "fp32": "", + "int4": "", + "int8": 110.17, + "fp16": "", + "fp32": 40.01, "bf16": "" } ], @@ -3872,9 +3907,9 @@ "latency": { "Precisions": [ { - "int4": 92.41, - "int8": 158.68, - "fp16": 906.89, + "int4": "", + "int8": 10.18, + "fp16": "", "fp32": "", "bf16": "" } @@ -3885,19 +3920,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "llama-2-7b-chat", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 14.62, - "int8": 8.53, + "int4": "", + "int8": 51.12, "fp16": "", - "fp32": "", + "fp32": 26.48, "bf16": "" } ], @@ -3907,8 +3942,8 @@ "latency": { "Precisions": [ { - "int4": 68.39, - "int8": 117.1, + "int4": "", + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -3920,19 +3955,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "llama-3-8b", - "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "efficientdet-d0", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 14.82, - "int8": 7.84, - "fp16": 4.04, - "fp32": "", + "int4": "", + "int8": 58.22, + "fp16": "", + "fp32": 28.25, "bf16": "" } ], @@ -3942,9 +3977,9 @@ "latency": { "Precisions": [ { - "int4": 67.44, - "int8": 127.51, - "fp16": 247.29, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -3955,19 +3990,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "llama-3.2-3b-instruct", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", + "Parameters": { "throughput": { "Precisions": [ { - "int4": 26.17, - "int8": 20.38, - "fp16": 10.76, - "fp32": "", + "int4": "", + "int8": 0.72, + "fp16": "", + "fp32": 0.32, "bf16": "" } ], @@ -3977,9 +4012,9 @@ "latency": { "Precisions": [ { - "int4": 38.21, - "int8": 49.06, - "fp16": 92.92, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -3990,19 +4025,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.35, - "fp16": 1.58, - "fp32": "", + "int8": 977.87, + "fp16": "", + "fp32": 347.74, "bf16": "" } ], @@ -4013,7 +4048,7 @@ "Precisions": [ { "int4": "", - "int8": 421.72, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4025,19 +4060,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "mistral-7b-v0.1", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 15.03, - "int8": 8.94, + "int4": "", + "int8": 234.93, "fp16": "", - "fp32": "", + "fp32": 87.7, "bf16": "" } ], @@ -4047,8 +4082,8 @@ "latency": { "Precisions": [ { - "int4": 66.52, - "int8": 111.8, + "int4": "", + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4060,19 +4095,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1293.98, - "fp16": 1371.59, - "fp32": "", + "int8": 4.44, + "fp16": "", + "fp32": 1.77, "bf16": "" } ], @@ -4095,19 +4130,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "phi-3-mini-4k-instruct", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 23.61, - "int8": 18.01, - "fp16": 9.36, - "fp32": "", + "int4": "", + "int8": 457.58, + "fp16": "", + "fp32": 163.76, "bf16": "" } ], @@ -4117,9 +4152,9 @@ "latency": { "Precisions": [ { - "int4": 42.34, - "int8": 55.51, - "fp16": 106.82, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -4130,19 +4165,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "qwen2-7b", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "yolo11", + "featured_SKU": false, "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 16.68, - "int8": 9.5, + "int4": "", + "int8": "", "fp16": "", - "fp32": "", + "fp32": 55.7, "bf16": "" } ], @@ -4152,8 +4187,8 @@ "latency": { "Precisions": [ { - "int4": 59.95, - "int8": 105.26, + "int4": "", + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4165,19 +4200,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 563.96, - "fp16": 416.13, - "fp32": "", + "int8": 103.01, + "fp16": "", + "fp32": 53.55, "bf16": "" } ], @@ -4200,19 +4235,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 21.26, - "fp16": 12.84, - "fp32": "", + "int8": 38.29, + "fp16": "", + "fp32": 13.8, "bf16": "" } ], @@ -4223,7 +4258,7 @@ "Precisions": [ { "int4": "", - "int8": 47.61, + "int8": 28.27, "fp16": "", "fp32": "", "bf16": "" @@ -4235,19 +4270,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1030.66, - "fp16": 811.13, - "fp32": "", + "int8": 54.05, + "fp16": "", + "fp32": 22.12, "bf16": "" } ], @@ -4258,7 +4293,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 20.1, "fp16": "", "fp32": "", "bf16": "" @@ -4270,19 +4305,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 403.44, - "fp16": 306.22, - "fp32": "", + "int8": 0.52, + "fp16": "", + "fp32": 0.13, "bf16": "" } ], @@ -4293,7 +4328,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1784.45, "fp16": "", "fp32": "", "bf16": "" @@ -4305,19 +4340,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "bert-base-cased", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 223.99, + "int8": 969.46, "fp16": "", - "fp32": 189.97, + "fp32": 314.77, "bf16": "" } ], @@ -4328,7 +4363,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.2, "fp16": "", "fp32": "", "bf16": "" @@ -4340,11 +4375,11 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4352,7 +4387,7 @@ "int4": "", "int8": 174.87, "fp16": "", - "fp32": 149.3, + "fp32": 45.34, "bf16": "" } ], @@ -4363,7 +4398,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 6.4, "fp16": "", "fp32": "", "bf16": "" @@ -4375,19 +4410,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 7.24, + "int8": 2.63, "fp16": "", - "fp32": 3.52, + "fp32": "", "bf16": "" } ], @@ -4398,7 +4433,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 320.71, "fp16": "", "fp32": "", "bf16": "" @@ -4410,19 +4445,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 4846.91, + "int8": 385.99, "fp16": "", - "fp32": 2888.98, + "fp32": 100.06, "bf16": "" } ], @@ -4433,7 +4468,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.81, "fp16": "", "fp32": "", "bf16": "" @@ -4445,19 +4480,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "resnet-50", - "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "yolo11", + "featured_SKU": false, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1975.45, + "int8": "", "fp16": "", - "fp32": 922.35, + "fp32": 31.93, "bf16": "" } ], @@ -4480,19 +4515,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1185GRE CPU-only", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 77.3, "fp16": "", - "fp32": 20.97, + "fp32": 27.78, "bf16": "" } ], @@ -4503,7 +4538,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 13.2, "fp16": "", "fp32": "", "bf16": "" @@ -4515,9 +4550,9 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { @@ -4525,9 +4560,9 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 113.15, "fp16": "", - "fp32": 585.46, + "fp32": 57.03, "bf16": "" } ], @@ -4550,9 +4585,9 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "efficientdet-d0", + "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { @@ -4560,9 +4595,9 @@ "Precisions": [ { "int4": "", - "int8": 343.07, + "int8": 141.82, "fp16": "", - "fp32": 274.85, + "fp32": 75.09, "bf16": "" } ], @@ -4585,19 +4620,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "bert-base-cased", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 44.06, + "int8": 1.62, "fp16": "", - "fp32": 16.03, + "fp32": 0.71, "bf16": "" } ], @@ -4608,7 +4643,7 @@ "Precisions": [ { "int4": "", - "int8": 41.27, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4620,19 +4655,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 53.32, + "int8": 2306.82, "fp16": "", - "fp32": 38.06, + "fp32": 1147.83, "bf16": "" } ], @@ -4643,7 +4678,7 @@ "Precisions": [ { "int4": "", - "int8": 28.44, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4655,19 +4690,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.65, + "int8": 528.4, "fp16": "", - "fp32": 0.16, + "fp32": 183.43, "bf16": "" } ], @@ -4678,7 +4713,7 @@ "Precisions": [ { "int4": "", - "int8": 2598.78, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4690,19 +4725,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 917.84, + "int8": 10.4, "fp16": "", - "fp32": 490.87, + "fp32": 3.75, "bf16": "" } ], @@ -4713,7 +4748,7 @@ "Precisions": [ { "int4": "", - "int8": 2.07, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4725,19 +4760,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 194.09, + "int8": 1014.24, "fp16": "", - "fp32": 52.09, + "fp32": 400.36, "bf16": "" } ], @@ -4748,7 +4783,7 @@ "Precisions": [ { "int4": "", - "int8": 9.58, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4760,19 +4795,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "yolo11", + "featured_SKU": false, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3.52, + "int8": "", "fp16": "", - "fp32": 0.87, + "fp32": 131.95, "bf16": "" } ], @@ -4783,7 +4818,7 @@ "Precisions": [ { "int4": "", - "int8": 493.86, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4795,19 +4830,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU+iGPU", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 380.37, + "int8": 266.22, "fp16": "", - "fp32": 135.96, + "fp32": 120.83, "bf16": "" } ], @@ -4818,7 +4853,7 @@ "Precisions": [ { "int4": "", - "int8": 4.64, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -4830,9 +4865,9 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { @@ -4840,9 +4875,9 @@ "Precisions": [ { "int4": "", - "int8": 80.52, + "int8": 87.91, "fp16": "", - "fp32": 34.88, + "fp32": 34.69, "bf16": "" } ], @@ -4853,7 +4888,7 @@ "Precisions": [ { "int4": "", - "int8": 20.34, + "int8": 16.29, "fp16": "", "fp32": "", "bf16": "" @@ -4865,19 +4900,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "bert-base-cased", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 265.97, - "fp16": 198.16, - "fp32": "", + "int8": 115.7, + "fp16": "", + "fp32": 61.66, "bf16": "" } ], @@ -4888,7 +4923,7 @@ "Precisions": [ { "int4": "", - "int8": 5.25, + "int8": 11.66, "fp16": "", "fp32": "", "bf16": "" @@ -4900,19 +4935,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 13.69, - "fp16": 13.65, - "fp32": "", + "int8": 1.27, + "fp16": "", + "fp32": 0.36, "bf16": "" } ], @@ -4923,7 +4958,7 @@ "Precisions": [ { "int4": "", - "int8": 119.56, + "int8": 890.38, "fp16": "", "fp32": "", "bf16": "" @@ -4935,19 +4970,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "llama-2-7b-chat", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.24, - "fp16": 4.4, - "fp32": "", + "int8": 1984.09, + "fp16": "", + "fp32": 970.61, "bf16": "" } ], @@ -4958,8 +4993,8 @@ "Precisions": [ { "int4": "", - "int8": 4094.9, - "fp16": 226.87, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -4970,19 +5005,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3799.36, - "fp16": 3178.95, - "fp32": "", + "int8": 429.37, + "fp16": "", + "fp32": 109.95, "bf16": "" } ], @@ -4993,7 +5028,7 @@ "Precisions": [ { "int4": "", - "int8": 0.46, + "int8": 3.48, "fp16": "", "fp32": "", "bf16": "" @@ -5005,19 +5040,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "phi-3-mini-4k-instruct", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, NPU-only", + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 3.31, - "int8": 0.72, - "fp16": 6.86, - "fp32": "", + "int4": "", + "int8": 7.12, + "fp16": "", + "fp32": 1.96, "bf16": "" } ], @@ -5027,9 +5062,9 @@ "latency": { "Precisions": [ { - "int4": 301.49, - "int8": 1378.29, - "fp16": 145.76, + "int4": "", + "int8": 158.86, + "fp16": "", "fp32": "", "bf16": "" } @@ -5040,19 +5075,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2161.26, - "fp16": 948.32, - "fp32": "", + "int8": 850.51, + "fp16": "", + "fp32": 290.18, "bf16": "" } ], @@ -5063,7 +5098,7 @@ "Precisions": [ { "int4": "", - "int8": 0.79, + "int8": 1.71, "fp16": "", "fp32": "", "bf16": "" @@ -5075,19 +5110,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "Platform": "Intel® Core™ i7-12700H CPU-only", + "Model": "yolo11", + "featured_SKU": false, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 230.18, - "fp16": 192.78, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 90.98, "bf16": "" } ], @@ -5098,7 +5133,7 @@ "Precisions": [ { "int4": "", - "int8": 8.29, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -5110,19 +5145,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V NPU-only", + "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "yolo_v8n", - "featured_SKU": true, + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, NPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 401.12, - "fp16": 497.56, - "fp32": "", + "int8": 206.01, + "fp16": "", + "fp32": 77.63, "bf16": "" } ], @@ -5133,7 +5168,7 @@ "Precisions": [ { "int4": "", - "int8": 3.97, + "int8": 6.48, "fp16": "", "fp32": "", "bf16": "" @@ -5145,19 +5180,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "bert-base-cased", - "featured_SKU": true, + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 225.83, - "fp16": 298.39, - "fp32": "", + "int8": 60.89, + "fp16": "", + "fp32": 32.08, "bf16": "" } ], @@ -5168,7 +5203,7 @@ "Precisions": [ { "int4": "", - "int8": 3.93, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -5180,19 +5215,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "efficientdet-d0", - "featured_SKU": true, + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 114.57, - "fp16": 121.87, - "fp32": "", + "int8": 87.91, + "fp16": "", + "fp32": 58.38, "bf16": "" } ], @@ -5203,7 +5238,7 @@ "Precisions": [ { "int4": "", - "int8": 10.22, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -5215,18 +5250,18 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "gemma-2-9b", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 14.49, - "int8": 8.34, - "fp16": 0.59, + "int4": "", + "int8": 0.81, + "fp16": "", "fp32": "", "bf16": "" } @@ -5237,9 +5272,9 @@ "latency": { "Precisions": [ { - "int4": 68.99, - "int8": 119.77, - "fp16": 1691.52, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -5250,19 +5285,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "glm-4-9b-chat", - "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "mobilenet-v2", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 17.63, - "int8": 9.8, - "fp16": 0.71, - "fp32": "", + "int4": "", + "int8": 1202.56, + "fp16": "", + "fp32": 636.0, "bf16": "" } ], @@ -5272,9 +5307,9 @@ "latency": { "Precisions": [ { - "int4": 56.72, - "int8": 102.04, - "fp16": 1402.74, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -5285,19 +5320,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "llama-2-7b-chat", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 18.48, - "int8": 11.87, - "fp16": 6.44, - "fp32": "", + "int4": "", + "int8": 282.14, + "fp16": "", + "fp32": 108.19, "bf16": "" } ], @@ -5307,9 +5342,9 @@ "latency": { "Precisions": [ { - "int4": 54.09, - "int8": 84.18, - "fp16": 155.17, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -5320,19 +5355,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "llama-3-8b", - "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 20.41, - "int8": 11.07, - "fp16": 5.81, - "fp32": "", + "int4": "", + "int8": 5.67, + "fp16": "", + "fp32": 2.14, "bf16": "" } ], @@ -5342,9 +5377,9 @@ "latency": { "Precisions": [ { - "int4": 48.98, - "int8": 90.29, - "fp16": 171.98, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -5355,19 +5390,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "llama-3.2-3b-instruct", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 36.58, - "int8": 23.94, - "fp16": 12.86, - "fp32": "", + "int4": "", + "int8": 549.37, + "fp16": "", + "fp32": 228.26, "bf16": "" } ], @@ -5377,9 +5412,9 @@ "latency": { "Precisions": [ { - "int4": 27.33, - "int8": 41.77, - "fp16": 77.71, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -5390,19 +5425,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "yolo11", + "featured_SKU": false, + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 10.4, - "fp16": 5.7, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 79.72, "bf16": "" } ], @@ -5413,7 +5448,7 @@ "Precisions": [ { "int4": "", - "int8": 109.21, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -5425,19 +5460,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "mistral-7b-v0.1", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", + "Model": "yolo_v8n", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { - "int4": 20.06, - "int8": 11.6, - "fp16": 6.05, - "fp32": "", + "int4": "", + "int8": 152.16, + "fp16": "", + "fp32": 71.37, "bf16": "" } ], @@ -5447,9 +5482,9 @@ "latency": { "Precisions": [ { - "int4": 49.85, - "int8": 86.18, - "fp16": 165.15, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -5460,19 +5495,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "mobilenet-v2", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "bert-base-cased", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1007.75, - "fp16": 862.8, - "fp32": "", + "int8": 44.38, + "fp16": "", + "fp32": 17.86, "bf16": "" } ], @@ -5483,7 +5518,7 @@ "Precisions": [ { "int4": "", - "int8": 1.2, + "int8": 37.68, "fp16": "", "fp32": "", "bf16": "" @@ -5495,19 +5530,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "phi-3-mini-4k-instruct", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 31.27, - "int8": 20.55, - "fp16": 11.04, - "fp32": "", + "int4": "", + "int8": 61.43, + "fp16": "", + "fp32": 39.3, "bf16": "" } ], @@ -5517,9 +5552,9 @@ "latency": { "Precisions": [ { - "int4": 31.97, - "int8": 48.66, - "fp16": 90.57, + "int4": "", + "int8": 27.06, + "fp16": "", "fp32": "", "bf16": "" } @@ -5530,19 +5565,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "qwen2-7b", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 20.99, - "int8": 12.69, - "fp16": 6.07, - "fp32": "", + "int4": "", + "int8": 0.63, + "fp16": "", + "fp32": 0.17, "bf16": "" } ], @@ -5552,9 +5587,9 @@ "latency": { "Precisions": [ { - "int4": 47.64, - "int8": 78.78, - "fp16": 164.54, + "int4": "", + "int8": 1970.66, + "fp16": "", "fp32": "", "bf16": "" } @@ -5565,19 +5600,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "resnet-50", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 830.46, - "fp16": 585.38, - "fp32": "", + "int8": 1035.64, + "fp16": "", + "fp32": 515.95, "bf16": "" } ], @@ -5588,7 +5623,7 @@ "Precisions": [ { "int4": "", - "int8": 1.23, + "int8": 1.62, "fp16": "", "fp32": "", "bf16": "" @@ -5600,19 +5635,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "resnet-50", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 57.99, - "fp16": 32.18, - "fp32": "", + "int8": 200.99, + "fp16": "", + "fp32": 58.72, "bf16": "" } ], @@ -5623,7 +5658,7 @@ "Precisions": [ { "int4": "", - "int8": 26.21, + "int8": 9.03, "fp16": "", "fp32": "", "bf16": "" @@ -5635,19 +5670,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "ssd-resnet34-1200", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 485.85, - "fp16": 555.71, - "fp32": "", + "int8": 3.43, + "fp16": "", + "fp32": 1.02, "bf16": "" } ], @@ -5658,7 +5693,7 @@ "Precisions": [ { "int4": "", - "int8": 1.75, + "int8": 445.79, "fp16": "", "fp32": "", "bf16": "" @@ -5670,19 +5705,19 @@ } }, { - "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU-only", - "Model": "yolo_v8n", - "featured_SKU": true, + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 362.75, - "fp16": 375.06, - "fp32": "", + "int8": 418.13, + "fp16": "", + "fp32": 150.89, "bf16": "" } ], @@ -5693,7 +5728,7 @@ "Precisions": [ { "int4": "", - "int8": 3.3, + "int8": 3.89, "fp16": "", "fp32": "", "bf16": "" @@ -5705,19 +5740,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "bert-base-cased", + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 34.21, + "int8": "", "fp16": "", - "fp32": 15.71, + "fp32": 48.48, "bf16": "" } ], @@ -5740,19 +5775,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 47.95, + "int8": 100.45, "fp16": "", - "fp32": 29.38, + "fp32": 40.52, "bf16": "" } ], @@ -5763,7 +5798,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 17.12, "fp16": "", "fp32": "", "bf16": "" @@ -5775,19 +5810,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.5, + "int8": 169.91, "fp16": "", - "fp32": 0.18, + "fp32": 67.83, "bf16": "" } ], @@ -5798,7 +5833,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 10.74, "fp16": "", "fp32": "", "bf16": "" @@ -5810,19 +5845,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 742.67, + "int8": 222.19, "fp16": "", - "fp32": 331.98, + "fp32": 126.81, "bf16": "" } ], @@ -5833,7 +5868,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 7.32, "fp16": "", "fp32": "", "bf16": "" @@ -5845,31 +5880,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "resnet-50", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "gemma-2-9b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 162.84, - "fp16": "", - "fp32": 51.66, + "int4": 9.57, + "int8": 6.99, + "fp16": 3.59, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 104.39, + "int8": 143.05, + "fp16": 277.85, "fp32": "", "bf16": "" } @@ -5880,31 +5915,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "glm-4-9b-chat", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": "false", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 1.03, + "int4": 10.66, + "int8": 7.49, + "fp16": 3.82, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 93.74, + "int8": 133.46, + "fp16": 261.55, "fp32": "", "bf16": "" } @@ -5915,31 +5950,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "llama-2-7b-chat", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 328.29, - "fp16": "", - "fp32": 115.41, + "int4": 14.22, + "int8": 9.66, + "fp16": 4.96, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 70.32, + "int8": 103.45, + "fp16": 201.53, "fp32": "", "bf16": "" } @@ -5950,31 +5985,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "yolo11", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "llama-3-8b", "featured_SKU": false, "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 41.68, + "int4": 12.04, + "int8": 8.7, + "fp16": 4.48, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 83.05, + "int8": 114.86, + "fp16": 223.06, "fp32": "", "bf16": "" } @@ -5985,31 +6020,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "llama-3.2-3b-instruct", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": true, + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 79.4, - "fp16": "", - "fp32": 35.44, + "int4": 27.06, + "int8": 19.19, + "fp16": 10.21, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 36.95, + "int8": 52.11, + "fp16": 97.91, "fp32": "", "bf16": "" } @@ -6020,8 +6055,8 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", @@ -6030,9 +6065,9 @@ "Precisions": [ { "int4": "", - "int8": 31.55, + "int8": 2.49, "fp16": "", - "fp32": 12.38, + "fp32": 0.71, "bf16": "" } ], @@ -6043,7 +6078,7 @@ "Precisions": [ { "int4": "", - "int8": 46.55, + "int8": 563.36, "fp16": "", "fp32": "", "bf16": "" @@ -6055,8 +6090,8 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "mistral-7b-v0.1", "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", @@ -6064,22 +6099,22 @@ "throughput": { "Precisions": [ { - "int4": "", - "int8": 43.39, - "fp16": "", - "fp32": 23.14, + "int4": 14.65, + "int8": 9.16, + "fp16": 4.72, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 33.09, - "fp16": "", + "int4": 68.23, + "int8": 109.06, + "fp16": 211.82, "fp32": "", "bf16": "" } @@ -6090,8 +6125,8 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", @@ -6100,9 +6135,9 @@ "Precisions": [ { "int4": "", - "int8": 0.45, + "int8": 4254.4, "fp16": "", - "fp32": 0.12, + "fp32": 2049.21, "bf16": "" } ], @@ -6113,7 +6148,7 @@ "Precisions": [ { "int4": "", - "int8": 2440.72, + "int8": 0.6, "fp16": "", "fp32": "", "bf16": "" @@ -6125,31 +6160,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": false, - "whats_new_model": false, + "whats_new_model": true, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 789.02, - "fp16": "", + "int4": 19.2, + "int8": 16.44, + "fp16": 8.59, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1.9, - "fp16": "", + "int4": 52.06, + "int8": 60.81, + "fp16": 116.35, "fp32": "", "bf16": "" } @@ -6160,31 +6195,31 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "qwen2-7b", "featured_SKU": false, - "whats_new_model": false, + "whats_new_model": true, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 147.74, - "fp16": "", - "fp32": 38.84, + "int4": 13.12, + "int8": 9.29, + "fp16": 4.76, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 11.4, - "fp16": "", + "int4": 76.19, + "int8": 107.6, + "fp16": 209.76, "fp32": "", "bf16": "" } @@ -6195,8 +6230,8 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", @@ -6205,9 +6240,9 @@ "Precisions": [ { "int4": "", - "int8": 2.66, + "int8": 762.92, "fp16": "", - "fp32": 0.77, + "fp32": 233.76, "bf16": "" } ], @@ -6218,7 +6253,7 @@ "Precisions": [ { "int4": "", - "int8": 511.09, + "int8": 2.18, "fp16": "", "fp32": "", "bf16": "" @@ -6230,8 +6265,8 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", @@ -6240,9 +6275,9 @@ "Precisions": [ { "int4": "", - "int8": 313.17, + "int8": 13.03, "fp16": "", - "fp32": 95.81, + "fp32": 3.84, "bf16": "" } ], @@ -6253,7 +6288,7 @@ "Precisions": [ { "int4": "", - "int8": 4.81, + "int8": 102.76, "fp16": "", "fp32": "", "bf16": "" @@ -6265,19 +6300,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "yolo11", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, - "whats_new_model": "false", + "whats_new_model": false, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 1607.47, "fp16": "", - "fp32": 31.84, + "fp32": 590.49, "bf16": "" } ], @@ -6288,7 +6323,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.1, "fp16": "", "fp32": "", "bf16": "" @@ -6300,19 +6335,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, + "whats_new_model": true, "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 67.43, + "int8": "", "fp16": "", - "fp32": 26.68, + "fp32": 185.48, "bf16": "" } ], @@ -6323,7 +6358,7 @@ "Precisions": [ { "int4": "", - "int8": 20.62, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6335,19 +6370,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Core™ i9-13900K CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 46.15, - "fp16": 38.3, - "fp32": "", + "int8": 389.46, + "fp16": "", + "fp32": 155.36, "bf16": "" } ], @@ -6358,7 +6393,7 @@ "Precisions": [ { "int4": "", - "int8": 19.82, + "int8": 4.07, "fp16": "", "fp32": "", "bf16": "" @@ -6370,19 +6405,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", + "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 64.24, - "fp16": 50.43, - "fp32": "", + "int8": 36.73, + "fp16": "", + "fp32": 27.62, "bf16": "" } ], @@ -6393,7 +6428,7 @@ "Precisions": [ { "int4": "", - "int8": 20.17, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6405,19 +6440,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Processor N100 CPU+iGPU", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.5, - "fp16": 0.51, - "fp32": "", + "int8": 486.76, + "fp16": "", + "fp32": 276.51, "bf16": "" } ], @@ -6428,7 +6463,7 @@ "Precisions": [ { "int4": "", - "int8": 1499.27, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6440,19 +6475,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Processor N100 CPU+iGPU", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 768.31, - "fp16": 485.7, - "fp32": "", + "int8": 111.75, + "fp16": "", + "fp32": 42.11, "bf16": "" } ], @@ -6463,7 +6498,7 @@ "Precisions": [ { "int4": "", - "int8": 1.7, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6475,19 +6510,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", - "Model": "resnet-50", + "Platform": "Intel® Processor N100 CPU+iGPU", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 208.55, - "fp16": 117.84, - "fp32": "", + "int8": 2.03, + "fp16": "", + "fp32": 0.59, "bf16": "" } ], @@ -6498,7 +6533,7 @@ "Precisions": [ { "int4": "", - "int8": 5.0, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6510,19 +6545,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Processor N100 CPU+iGPU", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 5.64, - "fp16": 2.72, - "fp32": "", + "int8": 216.17, + "fp16": "", + "fp32": 94.47, "bf16": "" } ], @@ -6533,7 +6568,7 @@ "Precisions": [ { "int4": "", - "int8": 172.69, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6545,19 +6580,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Processor N100 CPU+iGPU", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 382.92, - "fp16": 223.39, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 34.22, "bf16": "" } ], @@ -6568,7 +6603,7 @@ "Precisions": [ { "int4": "", - "int8": 3.11, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6580,19 +6615,19 @@ } }, { - "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", + "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 126.83, - "fp16": 77.91, - "fp32": "", + "int8": 60.9, + "fp16": "", + "fp32": 28.4, "bf16": "" } ], @@ -6603,7 +6638,7 @@ "Precisions": [ { "int4": "", - "int8": 8.1, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -6614,20 +6649,20 @@ } } }, - { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "bert-base-cased", + { + "Platform": "Intel® Processor N100 iGPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 49.68, - "fp16": "", - "fp32": 26.85, + "int8": 33.68, + "fp16": 30.76, + "fp32": "", "bf16": "" } ], @@ -6638,7 +6673,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 38.43, "fp16": "", "fp32": "", "bf16": "" @@ -6650,19 +6685,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "efficientdet-d0", + "Platform": "Intel® Processor N100 iGPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 73.94, - "fp16": "", - "fp32": 48.63, + "int8": 338.82, + "fp16": 267.31, + "fp32": "", "bf16": "" } ], @@ -6673,7 +6708,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.87, "fp16": "", "fp32": "", "bf16": "" @@ -6685,19 +6720,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Processor N100 iGPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.69, - "fp16": "", - "fp32": 0.3, + "int8": 81.79, + "fp16": 49.77, + "fp32": "", "bf16": "" } ], @@ -6708,7 +6743,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 13.19, "fp16": "", "fp32": "", "bf16": "" @@ -6720,19 +6755,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "mobilenet-v2", + "Platform": "Intel® Processor N100 iGPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1050.26, - "fp16": "", - "fp32": 535.0, + "int8": 1.62, + "fp16": 1.01, + "fp32": "", "bf16": "" } ], @@ -6743,7 +6778,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 623.09, "fp16": "", "fp32": "", "bf16": "" @@ -6755,19 +6790,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "resnet-50", + "Platform": "Intel® Processor N100 iGPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 234.19, - "fp16": "", - "fp32": 87.89, + "int8": 164.45, + "fp16": 106.87, + "fp32": "", "bf16": "" } ], @@ -6778,7 +6813,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 7.39, "fp16": "", "fp32": "", "bf16": "" @@ -6790,19 +6825,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Processor N100 iGPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 4.74, - "fp16": "", - "fp32": 1.74, + "int8": 47.02, + "fp16": 34.98, + "fp32": "", "bf16": "" } ], @@ -6813,7 +6848,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 23.09, "fp16": "", "fp32": "", "bf16": "" @@ -6825,19 +6860,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 466.65, + "int8": 15.43, "fp16": "", - "fp32": 188.83, + "fp32": 12.72, "bf16": "" } ], @@ -6848,7 +6883,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 66.37, "fp16": "", "fp32": "", "bf16": "" @@ -6860,19 +6895,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "yolo11", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": false, + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 296.05, "fp16": "", - "fp32": 65.34, + "fp32": 182.7, "bf16": "" } ], @@ -6883,7 +6918,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.82, "fp16": "", "fp32": "", "bf16": "" @@ -6895,19 +6930,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", - "Model": "yolo_v8n", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 125.18, + "int8": 48.69, "fp16": "", - "fp32": 58.13, + "fp32": 20.16, "bf16": "" } ], @@ -6918,7 +6953,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 21.9, "fp16": "", "fp32": "", "bf16": "" @@ -6930,19 +6965,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 39.97, + "int8": 0.81, "fp16": "", - "fp32": 15.97, + "fp32": 0.31, "bf16": "" } ], @@ -6953,7 +6988,7 @@ "Precisions": [ { "int4": "", - "int8": 40.14, + "int8": 1223.75, "fp16": "", "fp32": "", "bf16": "" @@ -6965,19 +7000,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 56.15, + "int8": 105.91, "fp16": "", - "fp32": 35.76, + "fp32": 49.22, "bf16": "" } ], @@ -6988,7 +7023,7 @@ "Precisions": [ { "int4": "", - "int8": 28.73, + "int8": 9.72, "fp16": "", "fp32": "", "bf16": "" @@ -7000,19 +7035,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.57, + "int8": "", "fp16": "", - "fp32": 0.16, + "fp32": 15.38, "bf16": "" } ], @@ -7023,7 +7058,7 @@ "Precisions": [ { "int4": "", - "int8": 2069.28, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -7035,19 +7070,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Processor N100 CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 951.93, + "int8": 23.61, "fp16": "", - "fp32": 463.06, + "fp32": 12.8, "bf16": "" } ], @@ -7058,7 +7093,7 @@ "Precisions": [ { "int4": "", - "int8": 1.74, + "int8": 43.43, "fp16": "", "fp32": "", "bf16": "" @@ -7070,19 +7105,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 184.54, + "int8": 220.43, "fp16": "", - "fp32": 52.88, + "fp32": 80.48, "bf16": "" } ], @@ -7093,7 +7128,7 @@ "Precisions": [ { "int4": "", - "int8": 9.61, + "int8": 14.26, "fp16": "", "fp32": "", "bf16": "" @@ -7105,19 +7140,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3.16, + "int8": 271.82, "fp16": "", - "fp32": 0.92, + "fp32": 167.4, "bf16": "" } ], @@ -7128,7 +7163,7 @@ "Precisions": [ { "int4": "", - "int8": 466.34, + "int8": 11.27, "fp16": "", "fp32": "", "bf16": "" @@ -7140,19 +7175,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 383.62, + "int8": 3.27, "fp16": "", - "fp32": 134.93, + "fp32": 0.91, "bf16": "" } ], @@ -7163,7 +7198,7 @@ "Precisions": [ { "int4": "", - "int8": 4.16, + "int8": 636.88, "fp16": "", "fp32": "", "bf16": "" @@ -7175,19 +7210,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "yolo11", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": false, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 5423.43, "fp16": "", - "fp32": 43.64, + "fp32": 1935.82, "bf16": "" } ], @@ -7198,7 +7233,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.5, "fp16": "", "fp32": "", "bf16": "" @@ -7210,19 +7245,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 91.3, + "int8": 974.84, "fp16": "", - "fp32": 36.39, + "fp32": 269.18, "bf16": "" } ], @@ -7233,7 +7268,7 @@ "Precisions": [ { "int4": "", - "int8": 18.15, + "int8": 3.11, "fp16": "", "fp32": "", "bf16": "" @@ -7245,19 +7280,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 47.17, - "fp16": 39.79, - "fp32": "", + "int8": 17.65, + "fp16": "", + "fp32": 4.59, "bf16": "" } ], @@ -7268,7 +7303,7 @@ "Precisions": [ { "int4": "", - "int8": 18.45, + "int8": 116.16, "fp16": "", "fp32": "", "bf16": "" @@ -7280,19 +7315,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 80.6, - "fp16": 59.92, - "fp32": "", + "int8": 2101.22, + "fp16": "", + "fp32": 639.42, "bf16": "" } ], @@ -7303,7 +7338,7 @@ "Precisions": [ { "int4": "", - "int8": 14.61, + "int8": 1.6, "fp16": "", "fp32": "", "bf16": "" @@ -7315,19 +7350,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.52, - "fp16": 0.58, - "fp32": "", + "int8": "", + "fp16": "", + "fp32": 206.74, "bf16": "" } ], @@ -7338,7 +7373,7 @@ "Precisions": [ { "int4": "", - "int8": 1506.76, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -7350,19 +7385,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Gold 5218T CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 778.4, - "fp16": 509.56, - "fp32": "", + "int8": 440.68, + "fp16": "", + "fp32": 173.86, "bf16": "" } ], @@ -7373,7 +7408,7 @@ "Precisions": [ { "int4": "", - "int8": 1.48, + "int8": 6.0, "fp16": "", "fp32": "", "bf16": "" @@ -7385,19 +7420,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 225.12, - "fp16": 127.27, - "fp32": "", + "int8": 425.03, + "fp16": "", + "fp32": 163.81, "bf16": "" } ], @@ -7408,7 +7443,7 @@ "Precisions": [ { "int4": "", - "int8": 4.31, + "int8": 11.11, "fp16": "", "fp32": "", "bf16": "" @@ -7420,19 +7455,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 5.79, - "fp16": 2.86, - "fp32": "", + "int8": 411.76, + "fp16": "", + "fp32": 254.31, "bf16": "" } ], @@ -7443,7 +7478,7 @@ "Precisions": [ { "int4": "", - "int8": 144.71, + "int8": 8.6, "fp16": "", "fp32": "", "bf16": "" @@ -7455,19 +7490,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 404.76, - "fp16": 237.61, - "fp32": "", + "int8": 6.56, + "fp16": "", + "fp32": 1.64, "bf16": "" } ], @@ -7478,7 +7513,7 @@ "Precisions": [ { "int4": "", - "int8": 2.75, + "int8": 324.41, "fp16": "", "fp32": "", "bf16": "" @@ -7490,19 +7525,19 @@ } }, { - "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 131.89, - "fp16": 83.17, - "fp32": "", + "int8": 10309.85, + "fp16": "", + "fp32": 3324.77, "bf16": "" } ], @@ -7513,7 +7548,7 @@ "Precisions": [ { "int4": "", - "int8": 7.11, + "int8": 1.18, "fp16": "", "fp32": "", "bf16": "" @@ -7525,19 +7560,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 120.44, + "int8": 2132.35, "fp16": "", - "fp32": 47.21, + "fp32": 569.56, "bf16": "" } ], @@ -7548,7 +7583,7 @@ "Precisions": [ { "int4": "", - "int8": 13.32, + "int8": 1.85, "fp16": "", "fp32": "", "bf16": "" @@ -7560,19 +7595,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 148.91, + "int8": 41.73, "fp16": "", - "fp32": 93.08, + "fp32": 10.88, "bf16": "" } ], @@ -7583,7 +7618,7 @@ "Precisions": [ { "int4": "", - "int8": 9.22, + "int8": 49.6, "fp16": "", "fp32": "", "bf16": "" @@ -7595,19 +7630,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 4373.09, "fp16": "", - "fp32": 0.49, + "fp32": 1243.88, "bf16": "" } ], @@ -7618,7 +7653,7 @@ "Precisions": [ { "int4": "", - "int8": 733.91, + "int8": 1.28, "fp16": "", "fp32": "", "bf16": "" @@ -7630,19 +7665,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2974.41, + "int8": "", "fp16": "", - "fp32": 1317.04, + "fp32": 383.5, "bf16": "" } ], @@ -7653,7 +7688,7 @@ "Precisions": [ { "int4": "", - "int8": 0.69, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -7665,19 +7700,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 537.98, + "int8": 736.96, "fp16": "", - "fp32": 148.85, + "fp32": 338.37, "bf16": "" } ], @@ -7688,7 +7723,7 @@ "Precisions": [ { "int4": "", - "int8": 2.82, + "int8": 4.23, "fp16": "", "fp32": "", "bf16": "" @@ -7700,19 +7735,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 8.8, + "int8": 622.89, "fp16": "", - "fp32": 2.47, + "fp32": 241.06, "bf16": "" } ], @@ -7723,7 +7758,7 @@ "Precisions": [ { "int4": "", - "int8": 133.73, + "int8": 6.67, "fp16": "", "fp32": "", "bf16": "" @@ -7735,19 +7770,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1068.19, + "int8": 722.69, "fp16": "", - "fp32": 379.85, + "fp32": 422.11, "bf16": "" } ], @@ -7758,7 +7793,7 @@ "Precisions": [ { "int4": "", - "int8": 1.33, + "int8": 4.91, "fp16": "", "fp32": "", "bf16": "" @@ -7770,19 +7805,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "yolo11", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": false, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 10.35, "fp16": "", - "fp32": 122.62, + "fp32": 2.43, "bf16": "" } ], @@ -7793,7 +7828,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 226.02, "fp16": "", "fp32": "", "bf16": "" @@ -7805,19 +7840,19 @@ } }, { - "Platform": "Intel® Core™ i5-13600K CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 266.57, + "int8": 16483.42, "fp16": "", - "fp32": 102.14, + "fp32": 5183.13, "bf16": "" } ], @@ -7828,7 +7863,7 @@ "Precisions": [ { "int4": "", - "int8": 5.27, + "int8": 0.6, "fp16": "", "fp32": "", "bf16": "" @@ -7840,19 +7875,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 84.71, + "int8": 3360.26, "fp16": "", - "fp32": 51.06, + "fp32": 826.72, "bf16": "" } ], @@ -7863,7 +7898,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.35, "fp16": "", "fp32": "", "bf16": "" @@ -7875,19 +7910,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 98.02, + "int8": 60.79, "fp16": "", - "fp32": 65.51, + "fp32": 15.05, "bf16": "" } ], @@ -7898,7 +7933,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 36.9, "fp16": "", "fp32": "", "bf16": "" @@ -7910,19 +7945,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1.16, + "int8": 6964.97, "fp16": "", - "fp32": 0.64, + "fp32": 1758.94, "bf16": "" } ], @@ -7933,7 +7968,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.77, "fp16": "", "fp32": "", "bf16": "" @@ -7945,19 +7980,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1353.32, + "int8": "", "fp16": "", - "fp32": 683.15, + "fp32": 570.41, "bf16": "" } ], @@ -7980,19 +8015,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 365.63, + "int8": 1225.71, "fp16": "", - "fp32": 164.12, + "fp32": 494.95, "bf16": "" } ], @@ -8003,7 +8038,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.02, "fp16": "", "fp32": "", "bf16": "" @@ -8015,19 +8050,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 8.65, + "int8": 590.91, "fp16": "", - "fp32": 3.77, + "fp32": 225.85, "bf16": "" } ], @@ -8038,7 +8073,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 9.06, "fp16": "", "fp32": "", "bf16": "" @@ -8050,19 +8085,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 657.26, + "int8": 583.69, "fp16": "", - "fp32": 293.93, + "fp32": 344.05, "bf16": "" } ], @@ -8073,7 +8108,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 6.88, "fp16": "", "fp32": "", "bf16": "" @@ -8085,19 +8120,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "yolo11", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": false, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 8.61, "fp16": "", - "fp32": 107.24, + "fp32": 2.26, "bf16": "" } ], @@ -8108,7 +8143,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 251.68, "fp16": "", "fp32": "", "bf16": "" @@ -8120,19 +8155,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 182.9, + "int8": 14974.1, "fp16": "", - "fp32": 101.97, + "fp32": 4631.52, "bf16": "" } ], @@ -8143,7 +8178,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.93, "fp16": "", "fp32": "", "bf16": "" @@ -8155,19 +8190,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 50.21, + "int8": 2951.09, "fp16": "", - "fp32": 18.33, + "fp32": 758.44, "bf16": "" } ], @@ -8178,7 +8213,7 @@ "Precisions": [ { "int4": "", - "int8": 22.66, + "int8": 1.58, "fp16": "", "fp32": "", "bf16": "" @@ -8190,19 +8225,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 71.27, + "int8": 57.9, "fp16": "", - "fp32": 41.39, + "fp32": 14.99, "bf16": "" } ], @@ -8213,7 +8248,7 @@ "Precisions": [ { "int4": "", - "int8": 14.62, + "int8": 37.3, "fp16": "", "fp32": "", "bf16": "" @@ -8225,19 +8260,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.71, + "int8": 6130.79, "fp16": "", - "fp32": 0.19, + "fp32": 1659.32, "bf16": "" } ], @@ -8248,7 +8283,7 @@ "Precisions": [ { "int4": "", - "int8": 1361.21, + "int8": 1.19, "fp16": "", "fp32": "", "bf16": "" @@ -8260,19 +8295,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1291.06, + "int8": "", "fp16": "", - "fp32": 507.09, + "fp32": 512.68, "bf16": "" } ], @@ -8283,7 +8318,7 @@ "Precisions": [ { "int4": "", - "int8": 0.95, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -8295,19 +8330,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Platinum 8280 CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 224.68, + "int8": 1008.45, "fp16": "", - "fp32": 60.81, + "fp32": 451.77, "bf16": "" } ], @@ -8318,7 +8353,7 @@ "Precisions": [ { "int4": "", - "int8": 4.95, + "int8": 3.6, "fp16": "", "fp32": "", "bf16": "" @@ -8330,19 +8365,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3.84, + "int8": 877.5, "fp16": "", - "fp32": 1.01, + "fp32": 337.07, "bf16": "" } ], @@ -8353,7 +8388,7 @@ "Precisions": [ { "int4": "", - "int8": 250.45, + "int8": 5.18, "fp16": "", "fp32": "", "bf16": "" @@ -8365,19 +8400,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 491.99, + "int8": 1004.95, "fp16": "", - "fp32": 146.3, + "fp32": 577.33, "bf16": "" } ], @@ -8388,7 +8423,7 @@ "Precisions": [ { "int4": "", - "int8": 2.2, + "int8": 4.33, "fp16": "", "fp32": "", "bf16": "" @@ -8400,31 +8435,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "yolo11", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "gemma-2-9b", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 48.0, + "int4": 20.93, + "int8": 14.24, + "fp16": 7.69, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 47.77, + "int8": 70.2, + "fp16": 130.02, "fp32": "", "bf16": "" } @@ -8435,31 +8470,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "glm-4-9b-chat", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "whats_new_model": "false", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 106.45, - "fp16": "", - "fp32": 40.14, + "int4": 23.5, + "int8": 15.51, + "fp16": 8.49, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 10.2, - "fp16": "", + "int4": 42.54, + "int8": 64.45, + "fp16": 117.72, "fp32": "", "bf16": "" } @@ -8470,31 +8505,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "llama-2-7b-chat", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 68.4, - "fp16": 53.22, + "int4": 26.42, + "int8": 19.3, + "fp16": 10.54, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 17.09, - "fp16": "", + "int4": 37.84, + "int8": 51.81, + "fp16": 94.85, "fp32": "", "bf16": "" } @@ -8505,31 +8540,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "llama-3-8b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "whats_new_model": "false", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 91.46, - "fp16": 72.22, + "int4": 26.48, + "int8": 17.82, + "fp16": 9.62, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 17.92, - "fp16": "", + "int4": 37.76, + "int8": 56.1, + "fp16": 103.89, "fp32": "", "bf16": "" } @@ -8540,31 +8575,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "llama-3.2-3b-instruct", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 0.82, - "fp16": 0.88, + "int4": 49.95, + "int8": 34.9, + "fp16": 19.58, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1113.84, - "fp16": "", + "int4": 20.02, + "int8": 28.65, + "fp16": 51.05, "fp32": "", "bf16": "" } @@ -8575,19 +8610,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 729.72, - "fp16": 569.2, - "fp32": "", + "int8": 14.28, + "fp16": "", + "fp32": 3.4, "bf16": "" } ], @@ -8598,7 +8633,7 @@ "Precisions": [ { "int4": "", - "int8": 2.05, + "int8": 172.99, "fp16": "", "fp32": "", "bf16": "" @@ -8610,31 +8645,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "mistral-7b-v0.1", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 262.94, - "fp16": 174.98, + "int4": 27.2, + "int8": 18.7, + "fp16": 10.15, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 4.82, - "fp16": "", + "int4": 36.76, + "int8": 53.45, + "fp16": 98.43, "fp32": "", "bf16": "" } @@ -8645,19 +8680,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 8.29, - "fp16": 4.67, - "fp32": "", + "int8": 22593.46, + "fp16": "", + "fp32": 6937.81, "bf16": "" } ], @@ -8668,7 +8703,7 @@ "Precisions": [ { "int4": "", - "int8": 118.28, + "int8": 0.58, "fp16": "", "fp32": "", "bf16": "" @@ -8680,31 +8715,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 447.59, - "fp16": 299.29, + "int4": 40.17, + "int8": 30.4, + "fp16": 17.19, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 3.33, - "fp16": "", + "int4": 24.89, + "int8": 32.89, + "fp16": 58.17, "fp32": "", "bf16": "" } @@ -8715,31 +8750,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185G7 iGPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "qwen2-7b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 161.26, - "fp16": 111.45, + "int4": 29.5, + "int8": 19.45, + "fp16": 10.14, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 8.1, - "fp16": "", + "int4": 33.89, + "int8": 51.39, + "fp16": 98.53, "fp32": "", "bf16": "" } @@ -8750,19 +8785,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 50.01, + "int8": 4874.83, "fp16": "", - "fp32": 25.82, + "fp32": 1145.82, "bf16": "" } ], @@ -8773,7 +8808,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.07, "fp16": "", "fp32": "", "bf16": "" @@ -8785,19 +8820,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 57.69, + "int8": 84.49, "fp16": "", - "fp32": 28.41, + "fp32": 20.86, "bf16": "" } ], @@ -8808,7 +8843,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 26.86, "fp16": "", "fp32": "", "bf16": "" @@ -8820,19 +8855,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.69, + "int8": 10144.91, "fp16": "", - "fp32": "", + "fp32": 2524.18, "bf16": "" } ], @@ -8843,7 +8878,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.69, "fp16": "", "fp32": "", "bf16": "" @@ -8855,19 +8890,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "yolo11", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 958.94, + "int8": "", "fp16": "", - "fp32": 350.53, + "fp32": 803.23, "bf16": "" } ], @@ -8890,19 +8925,19 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Platinum 8380 CPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 230.4, + "int8": 1701.66, "fp16": "", - "fp32": 85.03, + "fp32": 696.4, "bf16": "" } ], @@ -8913,7 +8948,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.35, "fp16": "", "fp32": "", "bf16": "" @@ -8925,20 +8960,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "bert-base-cased", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 4.44, + "int8": 3014.41, "fp16": "", - "fp32": 1.75, - "bf16": "" + "fp32": 482.59, + "bf16": 1959.08 } ], "Unit": "FPS", @@ -8948,10 +8983,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.76, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 4.83 } ], "Unit": "ms", @@ -8960,20 +8995,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 456.16, + "int8": 1451.22, "fp16": "", - "fp32": 162.16, - "bf16": "" + "fp32": 859.98, + "bf16": 1016.97 } ], "Unit": "FPS", @@ -8983,10 +9018,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 4.76, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 5.09 } ], "Unit": "ms", @@ -8995,31 +9030,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "gemma-2-9b", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 55.98, + "int4": 22.73, + "int8": 16.68, + "fp16": 10.79, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 43.98, + "int8": 59.92, + "fp16": 92.62, "fp32": "", "bf16": "" } @@ -9030,31 +9065,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", - "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "glm-4-9b-chat", + "featured_SKU": true, + "whats_new_model": "false", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 103.63, - "fp16": "", - "fp32": 53.56, + "int4": 23.18, + "int8": 16.86, + "fp16": 11.29, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 43.14, + "int8": 59.31, + "fp16": 88.5, "fp32": "", "bf16": "" } @@ -9065,31 +9100,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "bert-base-cased", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "llama-2-7b-chat", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 38.28, - "fp16": "", - "fp32": 13.87, + "int4": 28.22, + "int8": 20.58, + "fp16": 14.55, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 28.41, - "fp16": "", + "int4": 35.43, + "int8": 48.58, + "fp16": 68.7, "fp32": "", "bf16": "" } @@ -9100,31 +9135,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "llama-3-8b", + "featured_SKU": true, + "whats_new_model": "false", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 53.34, - "fp16": "", - "fp32": 22.26, + "int4": 26.27, + "int8": 19.11, + "fp16": 13.32, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 20.12, - "fp16": "", + "int4": 38.06, + "int8": 52.31, + "fp16": 75.04, "fp32": "", "bf16": "" } @@ -9135,31 +9170,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "llama-3.2-3b-instruct", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 0.52, - "fp16": "", - "fp32": 0.14, + "int4": 48.35, + "int8": 38.34, + "fp16": 27.94, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1805.69, - "fp16": "", + "int4": 20.68, + "int8": 26.08, + "fp16": 35.79, "fp32": "", "bf16": "" } @@ -9170,20 +9205,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 972.25, + "int8": 61.27, "fp16": "", - "fp32": 311.82, - "bf16": "" + "fp32": 5.19, + "bf16": 37.47 } ], "Unit": "FPS", @@ -9193,10 +9228,10 @@ "Precisions": [ { "int4": "", - "int8": 1.2, + "int8": 60.02, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 81.99 } ], "Unit": "ms", @@ -9205,31 +9240,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "resnet-50", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "mistral-7b-v0.1", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 174.69, - "fp16": "", - "fp32": 45.52, + "int4": 28.78, + "int8": 20.01, + "fp16": 14.07, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 6.4, - "fp16": "", + "int4": 34.74, + "int8": 49.96, + "fp16": 71.05, "fp32": "", "bf16": "" } @@ -9240,20 +9275,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.72, + "int8": 38249.27, "fp16": "", - "fp32": 0.78, - "bf16": "" + "fp32": 10231.48, + "bf16": 25384.03 } ], "Unit": "FPS", @@ -9263,10 +9298,10 @@ "Precisions": [ { "int4": "", - "int8": 335.04, + "int8": 0.66, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 0.67 } ], "Unit": "ms", @@ -9275,31 +9310,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "phi-3-mini-4k-instruct", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 386.67, - "fp16": "", - "fp32": 99.8, + "int4": 42.19, + "int8": 35.39, + "fp16": 23.71, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 2.82, - "fp16": "", + "int4": 23.7, + "int8": 28.25, + "fp16": 42.17, "fp32": "", "bf16": "" } @@ -9310,206 +9345,31 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 32.19, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1185GRE CPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 76.54, - "fp16": "", - "fp32": 27.6, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 13.2, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", - "Model": "bert-base-cased", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 45.77, - "fp16": 40.93, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 21.21, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 56.2, - "fp16": 41.8, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 23.38, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 0.56, - "fp16": 0.54, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 1606.31, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "qwen2-7b", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 648.66, - "fp16": 431.47, + "int4": 31.0, + "int8": 22.59, + "fp16": 14.26, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1.76, - "fp16": "", + "int4": 32.25, + "int8": 44.25, + "fp16": 70.1, "fp32": "", "bf16": "" } @@ -9520,20 +9380,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "resnet-50", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 208.21, - "fp16": 122.24, - "fp32": "", - "bf16": "" + "int8": 19160.66, + "fp16": "", + "fp32": 1591.64, + "bf16": 7474.81 } ], "Unit": "FPS", @@ -9543,10 +9403,10 @@ "Precisions": [ { "int4": "", - "int8": 5.47, + "int8": 1.01, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 1.26 } ], "Unit": "ms", @@ -9555,20 +9415,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 5.71, - "fp16": 3.09, - "fp32": "", - "bf16": "" + "int8": 432.65, + "fp16": "", + "fp32": 30.54, + "bf16": 208.46 } ], "Unit": "FPS", @@ -9578,10 +9438,10 @@ "Precisions": [ { "int4": "", - "int8": 173.5, + "int8": 8.99, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 15.32 } ], "Unit": "ms", @@ -9590,55 +9450,20 @@ } }, { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 348.95, - "fp16": 224.45, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 3.56, + "int8": 24068.49, "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1185GRE iGPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 113.89, - "fp16": 78.71, - "fp32": "", - "bf16": "" + "fp32": 3408.57, + "bf16": 12163.6 } ], "Unit": "FPS", @@ -9648,10 +9473,10 @@ "Precisions": [ { "int4": "", - "int8": 9.49, + "int8": 0.76, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 0.9 } ], "Unit": "ms", @@ -9660,20 +9485,20 @@ } }, { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "bert-base-cased", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", + "Model": "yolo11", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 111.58, + "int8": "", "fp16": "", - "fp32": 57.55, - "bf16": "" + "fp32": 1039.51, + "bf16": 2046.23 } ], "Unit": "FPS", @@ -9686,7 +9511,7 @@ "int8": "", "fp16": "", "fp32": "", - "bf16": "" + "bf16": 2.93 } ], "Unit": "ms", @@ -9695,895 +9520,20 @@ } }, { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "efficientdet-d0", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 141.13, - "fp16": "", - "fp32": 75.23, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 1.63, - "fp16": "", - "fp32": 0.68, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 2287.47, - "fp16": "", - "fp32": 1150.08, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "resnet-50", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 532.56, - "fp16": "", - "fp32": 180.65, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 10.33, - "fp16": "", - "fp32": 3.81, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 1013.57, - "fp16": "", - "fp32": 403.5, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 133.88, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU+iGPU", - "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 268.57, - "fp16": "", - "fp32": 120.55, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "bert-base-cased", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 87.88, - "fp16": "", - "fp32": 34.76, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 16.26, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 113.82, - "fp16": "", - "fp32": 62.45, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 11.46, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 1.27, - "fp16": "", - "fp32": 0.36, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 886.78, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 1982.75, - "fp16": "", - "fp32": 968.72, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 0.89, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "resnet-50", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 429.58, - "fp16": "", - "fp32": 107.58, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 3.47, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 7.11, - "fp16": "", - "fp32": 1.96, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 159.25, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 854.13, - "fp16": "", - "fp32": 289.32, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 1.72, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 90.72, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": "", - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H CPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 206.32, - "fp16": "", - "fp32": 78.09, - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 6.49, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "bert-base-cased", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 89.81, - "fp16": 69.99, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 12.71, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 128.07, - "fp16": 97.39, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 12.87, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 1.04, - "fp16": 1.15, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 972.87, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 1281.93, - "fp16": 912.69, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 1.08, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "resnet-50", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 381.27, - "fp16": 226.42, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 3.22, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 10.47, - "fp16": 6.14, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 100.17, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 744.92, - "fp16": 407.72, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 1.87, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": 215.67, - "fp16": 148.01, - "fp32": "", - "bf16": "" - } - ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 5.58, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "bert-base-cased", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 61.33, + "int8": 2379.54, "fp16": "", - "fp32": 32.27, - "bf16": "" + "fp32": 948.88, + "bf16": 2377.59 } ], "Unit": "FPS", @@ -10593,10 +9543,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.15, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 2.54 } ], "Unit": "ms", @@ -10605,20 +9555,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "efficientdet-d0", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "bert-base-cased", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 88.48, + "int8": 4674.83, "fp16": "", - "fp32": 59.03, - "bf16": "" + "fp32": 560.52, + "bf16": 3250.44 } ], "Unit": "FPS", @@ -10628,10 +9578,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.63, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 4.47 } ], "Unit": "ms", @@ -10640,20 +9590,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.81, + "int8": 1730.17, "fp16": "", - "fp32": 0.43, - "bf16": "" + "fp32": 1134.1, + "bf16": 1410.09 } ], "Unit": "FPS", @@ -10663,10 +9613,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 4.81, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 4.6 } ], "Unit": "ms", @@ -10675,31 +9625,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "gemma-2-9b", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 1218.37, - "fp16": "", - "fp32": 644.91, + "int4": 26.7, + "int8": 19.39, + "fp16": 12.28, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 37.45, + "int8": 51.57, + "fp16": 81.41, "fp32": "", "bf16": "" } @@ -10710,31 +9660,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "resnet-50", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "glm-4-9b-chat", + "featured_SKU": true, + "whats_new_model": "false", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 284.91, - "fp16": "", - "fp32": 109.93, + "int4": 27.89, + "int8": 19.7, + "fp16": 12.99, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 35.85, + "int8": 50.74, + "fp16": 76.94, "fp32": "", "bf16": "" } @@ -10745,31 +9695,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "llama-2-7b-chat", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 5.67, - "fp16": "", - "fp32": 2.15, + "int4": 34.45, + "int8": 24.47, + "fp16": 16.77, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 29.02, + "int8": 40.86, + "fp16": 59.6, "fp32": "", "bf16": "" } @@ -10780,31 +9730,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "llama-3-8b", + "featured_SKU": true, + "whats_new_model": "false", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 554.73, - "fp16": "", - "fp32": 228.8, + "int4": 31.52, + "int8": 22.17, + "fp16": 15.17, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 31.72, + "int8": 45.1, + "fp16": 65.89, "fp32": "", "bf16": "" } @@ -10815,31 +9765,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU+iGPU", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "llama-3.2-3b-instruct", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", - "fp32": 80.32, + "int4": 58.68, + "int8": 44.62, + "fp16": 32.87, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 17.04, + "int8": 22.41, + "fp16": 30.42, "fp32": "", "bf16": "" } @@ -10850,20 +9800,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", - "Model": "yolo_v8n", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU+iGPU", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 154.56, + "int8": 75.46, "fp16": "", - "fp32": 72.19, - "bf16": "" + "fp32": 6.42, + "bf16": 48.12 } ], "Unit": "FPS", @@ -10873,10 +9823,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 55.8, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 73.33 } ], "Unit": "ms", @@ -10885,31 +9835,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", - "Model": "bert-base-cased", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "mistral-7b-v0.1", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 44.62, - "fp16": "", - "fp32": 17.96, + "int4": 33.88, + "int8": 23.05, + "fp16": 15.99, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 37.64, - "fp16": "", + "int4": 29.51, + "int8": 43.37, + "fp16": 62.53, "fp32": "", "bf16": "" } @@ -10920,20 +9870,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": false, + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "mobilenet-v2", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 61.85, + "int8": 39819.69, "fp16": "", - "fp32": 39.52, - "bf16": "" + "fp32": 15869.97, + "bf16": 29293.16 } ], "Unit": "FPS", @@ -10943,10 +9893,10 @@ "Precisions": [ { "int4": "", - "int8": 26.95, + "int8": 0.66, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 0.67 } ], "Unit": "ms", @@ -10955,31 +9905,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "phi-3-mini-4k-instruct", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 0.64, - "fp16": "", - "fp32": 0.17, + "int4": 50.96, + "int8": 41.58, + "fp16": 27.67, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1935.64, - "fp16": "", + "int4": 19.62, + "int8": 24.05, + "fp16": 36.14, "fp32": "", "bf16": "" } @@ -10990,31 +9940,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", + "Model": "qwen2-7b", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 1042.94, - "fp16": "", - "fp32": 515.99, + "int4": 37.77, + "int8": 26.53, + "fp16": 16.25, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1.61, - "fp16": "", + "int4": 26.47, + "int8": 37.69, + "fp16": 61.52, "fp32": "", "bf16": "" } @@ -11025,20 +9975,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "resnet-50", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 203.02, + "int8": 21640.96, "fp16": "", - "fp32": 59.12, - "bf16": "" + "fp32": 1998.64, + "bf16": 13585.61 } ], "Unit": "FPS", @@ -11048,10 +9998,10 @@ "Precisions": [ { "int4": "", - "int8": 9.0, + "int8": 1.0, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 1.21 } ], "Unit": "ms", @@ -11060,20 +10010,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3.48, + "int8": 510.69, "fp16": "", - "fp32": 1.03, - "bf16": "" + "fp32": 35.18, + "bf16": 273.78 } ], "Unit": "FPS", @@ -11083,10 +10033,10 @@ "Precisions": [ { "int4": "", - "int8": 439.19, + "int8": 7.68, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 12.33 } ], "Unit": "ms", @@ -11095,20 +10045,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 422.9, + "int8": 26761.66, "fp16": "", - "fp32": 151.69, - "bf16": "" + "fp32": 4711.32, + "bf16": 16670.32 } ], "Unit": "FPS", @@ -11118,10 +10068,10 @@ "Precisions": [ { "int4": "", - "int8": 3.87, + "int8": 0.72, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 1.16 } ], "Unit": "ms", @@ -11130,11 +10080,11 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11142,8 +10092,8 @@ "int4": "", "int8": "", "fp16": "", - "fp32": 48.93, - "bf16": "" + "fp32": 1455.84, + "bf16": 2965.31 } ], "Unit": "FPS", @@ -11156,7 +10106,7 @@ "int8": "", "fp16": "", "fp32": "", - "bf16": "" + "bf16": 3.11 } ], "Unit": "ms", @@ -11165,20 +10115,20 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor CPU-only", + "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "yolo_v8n", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 101.73, + "int8": 3045.87, "fp16": "", - "fp32": 40.76, - "bf16": "" + "fp32": 1259.07, + "bf16": 3431.21 } ], "Unit": "FPS", @@ -11188,10 +10138,10 @@ "Precisions": [ { "int4": "", - "int8": 16.99, + "int8": 3.06, "fp16": "", "fp32": "", - "bf16": "" + "bf16": 2.55 } ], "Unit": "ms", @@ -11200,18 +10150,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "bert-base-cased", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 67.08, - "fp16": 52.9, + "int8": 314.28, + "fp16": 348.05, "fp32": "", "bf16": "" } @@ -11223,7 +10173,7 @@ "Precisions": [ { "int4": "", - "int8": 14.38, + "int8": 4.87, "fp16": "", "fp32": "", "bf16": "" @@ -11235,18 +10185,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "efficientdet-d0", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 98.8, - "fp16": 73.53, + "int8": 329.65, + "fp16": 284.77, "fp32": "", "bf16": "" } @@ -11258,7 +10208,7 @@ "Precisions": [ { "int4": "", - "int8": 13.41, + "int8": 5.42, "fp16": "", "fp32": "", "bf16": "" @@ -11270,30 +10220,30 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "gemma-2-9b", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 6.21, - "int8": 3.88, + "int4": 20.28, + "int8": 17.7, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 160.82, - "int8": 257.32, + "int4": 49.3, + "int8": 56.48, "fp16": "", "fp32": "", "bf16": "" @@ -11305,30 +10255,30 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "glm-4-9b-chat", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": "false", - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 7.25, - "int8": 4.27, + "int4": 37.32, + "int8": 28.17, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 137.82, - "int8": 233.92, + "int4": 26.79, + "int8": 35.49, "fp16": "", "fp32": "", "bf16": "" @@ -11340,31 +10290,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "llama-2-7b-chat", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 8.53, - "int8": 5.74, - "fp16": "", + "int4": 42.77, + "int8": 33.5, + "fp16": 22.41, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 117.18, - "int8": 174.01, - "fp16": "", + "int4": 23.38, + "int8": 29.85, + "fp16": 44.61, "fp32": "", "bf16": "" } @@ -11375,30 +10325,30 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "llama-3-8b", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": "false", - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 8.49, - "int8": 5.06, + "int4": 40.04, + "int8": 30.94, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 117.69, - "int8": 197.3, + "int4": 24.97, + "int8": 32.32, "fp16": "", "fp32": "", "bf16": "" @@ -11410,31 +10360,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "llama-3.2-3b-instruct", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 16.79, - "int8": 11.89, - "fp16": 6.7, + "int4": 56.52, + "int8": 52.46, + "fp16": 36.06, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 59.54, - "int8": 84.05, - "fp16": 149.13, + "int4": 17.69, + "int8": 19.06, + "fp16": 27.73, "fp32": "", "bf16": "" } @@ -11445,18 +10395,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.73, - "fp16": 0.77, + "int8": 34.83, + "fp16": 19.44, "fp32": "", "bf16": "" } @@ -11468,7 +10418,7 @@ "Precisions": [ { "int4": "", - "int8": 1191.59, + "int8": 47.58, "fp16": "", "fp32": "", "bf16": "" @@ -11480,30 +10430,30 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "mistral-7b-v0.1", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 8.86, - "int8": 5.44, + "int4": 43.17, + "int8": 32.07, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 112.76, - "int8": 183.5, + "int4": 23.16, + "int8": 31.18, "fp16": "", "fp32": "", "bf16": "" @@ -11515,18 +10465,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "mobilenet-v2", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 869.88, - "fp16": 621.94, + "int8": 2380.28, + "fp16": 2106.51, "fp32": "", "bf16": "" } @@ -11538,7 +10488,7 @@ "Precisions": [ { "int4": "", - "int8": 1.38, + "int8": 1.18, "fp16": "", "fp32": "", "bf16": "" @@ -11550,31 +10500,31 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "phi-3-mini-4k-instruct", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 14.78, - "int8": 9.98, - "fp16": 5.45, + "int4": 58.37, + "int8": 47.34, + "fp16": 29.17, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 67.65, - "int8": 100.19, - "fp16": 183.48, + "int4": 17.13, + "int8": 21.12, + "fp16": 34.28, "fp32": "", "bf16": "" } @@ -11585,30 +10535,30 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "qwen2-7b", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 9.11, - "int8": 5.39, + "int4": 41.51, + "int8": 33.85, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 109.74, - "int8": 185.49, + "int4": 24.09, + "int8": 29.54, "fp16": "", "fp32": "", "bf16": "" @@ -11620,18 +10570,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "resnet-50", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 277.06, - "fp16": 164.27, + "int8": 1407.56, + "fp16": 1035.07, "fp32": "", "bf16": "" } @@ -11643,7 +10593,7 @@ "Precisions": [ { "int4": "", - "int8": 3.85, + "int8": 1.44, "fp16": "", "fp32": "", "bf16": "" @@ -11655,18 +10605,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 7.1, - "fp16": 3.99, + "int8": 112.36, + "fp16": 73.09, "fp32": "", "bf16": "" } @@ -11678,7 +10628,7 @@ "Precisions": [ { "int4": "", - "int8": 126.73, + "int8": 15.0, "fp16": "", "fp32": "", "bf16": "" @@ -11690,18 +10640,18 @@ } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 484.13, - "fp16": 298.47, + "int8": 1317.17, + "fp16": 1199.61, "fp32": "", "bf16": "" } @@ -11713,65 +10663,30 @@ "Precisions": [ { "int4": "", - "int8": 2.49, - "fp16": "", - "fp32": "", - "bf16": "" - } - ], - "Unit": "ms", - "UnitDesc": "lower is better" - } - } - }, - { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", - "Model": "stable-diffusion-v1-5", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", - "Parameters": { - "throughput": { - "Precisions": [ - { - "int4": "", - "int8": "", + "int8": 1.46, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", - "UnitDesc": "higher is better" - }, - "latency": { - "Precisions": [ - { - "int4": "", - "int8": 29.54, - "fp16": 29.97, - "fp32": "", - "bf16": "" - } - ], "Unit": "ms", "UnitDesc": "lower is better" } } }, { - "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Platform": "Intel® Arc™ A-Series Graphics dGPU", "Model": "yolo_v8n", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, iGPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 162.35, - "fp16": 106.83, + "int8": 516.71, + "fp16": 551.12, "fp32": "", "bf16": "" } @@ -11783,7 +10698,7 @@ "Precisions": [ { "int4": "", - "int8": 6.38, + "int8": 3.34, "fp16": "", "fp32": "", "bf16": "" @@ -11795,19 +10710,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "bert-base-cased", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 170.14, - "fp16": "", - "fp32": 67.07, + "int8": 166.8, + "fp16": 106.26, + "fp32": "", "bf16": "" } ], @@ -11818,7 +10733,7 @@ "Precisions": [ { "int4": "", - "int8": 10.73, + "int8": 6.48, "fp16": "", "fp32": "", "bf16": "" @@ -11830,19 +10745,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "efficientdet-d0", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 219.8, - "fp16": "", - "fp32": 126.91, + "int8": 200.77, + "fp16": 162.18, + "fp32": "", "bf16": "" } ], @@ -11853,7 +10768,7 @@ "Precisions": [ { "int4": "", - "int8": 7.34, + "int8": 8.23, "fp16": "", "fp32": "", "bf16": "" @@ -11865,31 +10780,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "gemma-2-9b", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 9.43, - "int8": 6.9, - "fp16": 3.59, + "int4": 11.16, + "int8": "", + "fp16": 0.9, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 106.06, - "int8": 144.96, - "fp16": 278.42, + "int4": 89.57, + "int8": "", + "fp16": 1105.2, "fp32": "", "bf16": "" } @@ -11900,31 +10815,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "glm-4-9b-chat", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 10.66, - "int8": 7.47, - "fp16": 3.84, + "int4": 13.65, + "int8": "", + "fp16": 1.17, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 93.82, - "int8": 133.88, - "fp16": 260.67, + "int4": 73.24, + "int8": "", + "fp16": 849.49, "fp32": "", "bf16": "" } @@ -11935,31 +10850,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "llama-2-7b-chat", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 13.44, - "int8": 9.29, - "fp16": 4.94, + "int4": 15.55, + "int8": 10.59, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 74.39, - "int8": 107.62, - "fp16": 202.32, + "int4": 64.29, + "int8": 94.35, + "fp16": "", "fp32": "", "bf16": "" } @@ -11970,31 +10885,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "llama-3-8b", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 11.91, - "int8": 8.66, - "fp16": 4.81, + "int4": 15.53, + "int8": 8.75, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 83.93, - "int8": 115.49, - "fp16": 223.15, + "int4": 64.39, + "int8": 114.23, + "fp16": "", "fp32": "", "bf16": "" } @@ -12005,31 +10920,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "llama-3.2-3b-instruct", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 25.41, - "int8": 19.0, - "fp16": 10.18, + "int4": 29.61, + "int8": 20.65, + "fp16": 12.31, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 39.36, - "int8": 52.64, - "fp16": 98.24, + "int4": 33.77, + "int8": 48.42, + "fp16": 81.22, "fp32": "", "bf16": "" } @@ -12040,19 +10955,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.49, - "fp16": "", - "fp32": 0.71, + "int8": 2.26, + "fp16": 1.57, + "fp32": "", "bf16": "" } ], @@ -12063,7 +10978,7 @@ "Precisions": [ { "int4": "", - "int8": 562.6, + "int8": 422.52, "fp16": "", "fp32": "", "bf16": "" @@ -12075,31 +10990,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "mistral-7b-v0.1", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 14.41, - "int8": 9.13, - "fp16": 4.72, + "int4": 15.86, + "int8": 10.16, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 69.4, - "int8": 109.54, - "fp16": 211.92, + "int4": 63.02, + "int8": 98.38, + "fp16": "", "fp32": "", "bf16": "" } @@ -12110,19 +11025,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "mobilenet-v2", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 4239.14, - "fp16": "", - "fp32": 2047.2, + "int8": 1303.1, + "fp16": 1365.49, + "fp32": "", "bf16": "" } ], @@ -12133,7 +11048,7 @@ "Precisions": [ { "int4": "", - "int8": 0.6, + "int8": 1.14, "fp16": "", "fp32": "", "bf16": "" @@ -12145,31 +11060,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "phi-3-mini-4k-instruct", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 15.66, - "fp16": 8.52, + "int4": 25.54, + "int8": 18.45, + "fp16": 10.44, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 63.85, - "fp16": 117.37, + "int4": 39.15, + "int8": 54.19, + "fp16": 95.78, "fp32": "", "bf16": "" } @@ -12180,31 +11095,31 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "qwen2-7b", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 13.1, - "int8": 9.24, - "fp16": 4.75, + "int4": 17.1, + "int8": 9.68, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 63.85, - "fp16": 117.37, + "int4": 58.45, + "int8": 103.26, + "fp16": "", "fp32": "", "bf16": "" } @@ -12215,19 +11130,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "resnet-50", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 762.32, - "fp16": "", - "fp32": 234.53, + "int8": 556.12, + "fp16": 402.2, + "fp32": "", "bf16": "" } ], @@ -12238,7 +11153,7 @@ "Precisions": [ { "int4": "", - "int8": 2.17, + "int8": 2.38, "fp16": "", "fp32": "", "bf16": "" @@ -12250,19 +11165,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "ssd-resnet34-1200", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 12.97, - "fp16": "", - "fp32": 3.84, + "int8": 20.62, + "fp16": 12.55, + "fp32": "", "bf16": "" } ], @@ -12273,7 +11188,7 @@ "Precisions": [ { "int4": "", - "int8": 102.02, + "int8": 47.68, "fp16": "", "fp32": "", "bf16": "" @@ -12285,19 +11200,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1606.89, - "fp16": "", - "fp32": 589.62, + "int8": 1027.05, + "fp16": 803.37, + "fp32": "", "bf16": "" } ], @@ -12308,7 +11223,7 @@ "Precisions": [ { "int4": "", - "int8": 1.08, + "int8": 1.45, "fp16": "", "fp32": "", "bf16": "" @@ -12320,11 +11235,11 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", - "Model": "stable-diffusion-v1-5", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", + "Model": "yolo11", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12332,7 +11247,7 @@ "int4": "", "int8": "", "fp16": "", - "fp32": "", + "fp32": 299.66, "bf16": "" } ], @@ -12343,8 +11258,8 @@ "Precisions": [ { "int4": "", - "int8": 40.27, - "fp16": 39.61, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -12355,19 +11270,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Core™, CPU-only", + "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", + "Model": "yolo_v8n", + "featured_SKU": true, + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 187.66, + "int8": 397.54, + "fp16": 297.68, + "fp32": "", "bf16": "" } ], @@ -12378,7 +11293,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.13, "fp16": "", "fp32": "", "bf16": "" @@ -12390,19 +11305,19 @@ } }, { - "Platform": "Intel® Core™ i9-13900K CPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", + "Model": "bert-base-cased", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Core™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 389.04, - "fp16": "", - "fp32": 154.4, + "int8": 259.68, + "fp16": 271.46, + "fp32": 262.33, "bf16": "" } ], @@ -12413,7 +11328,7 @@ "Precisions": [ { "int4": "", - "int8": 4.13, + "int8": 5.05, "fp16": "", "fp32": "", "bf16": "" @@ -12425,19 +11340,19 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", - "Model": "bert-base-cased", - "featured_SKU": "false", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", + "Model": "efficientdet-d0", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 385.87, - "fp16": 420.99, - "fp32": "", + "int8": 167.07, + "fp16": 174.75, + "fp32": 178.07, "bf16": "" } ], @@ -12448,7 +11363,7 @@ "Precisions": [ { "int4": "", - "int8": 2.99, + "int8": 8.26, "fp16": "", "fp32": "", "bf16": "" @@ -12460,18 +11375,18 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", - "Model": "efficientdet-d0", - "featured_SKU": "false", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", + "Model": "falcon-7b-instruct", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 426.56, - "fp16": 362.73, + "int4": 17.7, + "int8": 10.82, + "fp16": 6.02, "fp32": "", "bf16": "" } @@ -12482,9 +11397,9 @@ "latency": { "Precisions": [ { - "int4": "", - "int8": 2.8, - "fp16": "", + "int4": 56.49, + "int8": 92.37, + "fp16": 166.08, "fp32": "", "bf16": "" } @@ -12495,30 +11410,30 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "gemma-2-9b", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 22.66, - "int8": 18.13, + "int4": 13.03, + "int8": 7.64, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 44.13, - "int8": 55.13, + "int4": 76.74, + "int8": 130.86, "fp16": "", "fp32": "", "bf16": "" @@ -12530,30 +11445,30 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "glm-4-9b-chat", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": "false", - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 40.04, - "int8": 26.95, + "int4": 15.23, + "int8": 8.11, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 24.97, - "int8": 37.1, + "int4": 65.65, + "int8": 123.23, "fp16": "", "fp32": "", "bf16": "" @@ -12565,31 +11480,31 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "llama-2-7b-chat", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 45.22, - "int8": 33.88, - "fp16": 21.45, + "int4": 16.8, + "int8": 10.77, + "fp16": 4.57, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 22.11, - "int8": 29.51, - "fp16": 46.62, + "int4": 59.5, + "int8": 92.8, + "fp16": 218.59, "fp32": "", "bf16": "" } @@ -12600,30 +11515,30 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "llama-3-8b", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": "false", - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 45.55, - "int8": 30.8, + "int4": 17.36, + "int8": 9.55, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 21.95, - "int8": 32.46, + "int4": 57.58, + "int8": 104.65, "fp16": "", "fp32": "", "bf16": "" @@ -12635,31 +11550,31 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "llama-3.2-3b-instruct", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 69.44, - "int8": 57.9, - "fp16": 37.69, + "int4": 31.23, + "int8": 20.44, + "fp16": 11.56, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 14.4, - "int8": 17.27, - "fp16": 26.53, + "int4": 32.02, + "int8": 48.9, + "fp16": 86.45, "fp32": "", "bf16": "" } @@ -12670,19 +11585,19 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 33.38, - "fp16": 19.04, - "fp32": "", + "int8": 12.33, + "fp16": 7.29, + "fp32": 7.29, "bf16": "" } ], @@ -12693,7 +11608,7 @@ "Precisions": [ { "int4": "", - "int8": 48.67, + "int8": 91.66, "fp16": "", "fp32": "", "bf16": "" @@ -12705,31 +11620,31 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "mistral-7b-v0.1", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 45.53, - "int8": 32.37, - "fp16": 20.21, + "int4": 17.13, + "int8": 10.23, + "fp16": 4.99, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 21.96, - "int8": 30.89, - "fp16": 49.48, + "int4": 58.35, + "int8": 97.71, + "fp16": 200.07, "fp32": "", "bf16": "" } @@ -12740,19 +11655,19 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "mobilenet-v2", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3134.27, - "fp16": 3004.5, - "fp32": "", + "int8": 941.33, + "fp16": 970.79, + "fp32": 1078.52, "bf16": "" } ], @@ -12763,7 +11678,7 @@ "Precisions": [ { "int4": "", - "int8": 0.57, + "int8": 1.3, "fp16": "", "fp32": "", "bf16": "" @@ -12775,31 +11690,31 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "phi-3-mini-4k-instruct", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 69.93, - "int8": 51.51, - "fp16": 32.84, + "int4": 26.93, + "int8": 17.85, + "fp16": 10.16, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 14.3, - "int8": 19.41, - "fp16": 30.45, + "int4": 37.13, + "int8": 56.01, + "fp16": 98.33, "fp32": "", "bf16": "" } @@ -12810,31 +11725,31 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "qwen2-7b", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 45.8, - "int8": 32.78, - "fp16": "", + "int4": 18.11, + "int8": 10.86, + "fp16": 5.1, "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 21.83, - "int8": 30.5, - "fp16": "", + "int4": 55.21, + "int8": 92.08, + "fp16": 195.84, "fp32": "", "bf16": "" } @@ -12845,19 +11760,19 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "resnet-50", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1921.18, - "fp16": 1329.28, - "fp32": "", + "int8": 856.81, + "fp16": 591.62, + "fp32": 580.46, "bf16": "" } ], @@ -12868,7 +11783,7 @@ "Precisions": [ { "int4": "", - "int8": 0.78, + "int8": 1.56, "fp16": "", "fp32": "", "bf16": "" @@ -12880,19 +11795,19 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "ssd-resnet34-1200", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 133.77, - "fp16": "", - "fp32": "", + "int8": 64.32, + "fp16": 38.26, + "fp32": 38.09, "bf16": "" } ], @@ -12903,7 +11818,7 @@ "Precisions": [ { "int4": "", - "int8": 13.93, + "int8": 20.89, "fp16": "", "fp32": "", "bf16": "" @@ -12915,19 +11830,19 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2200.83, - "fp16": 1665.15, - "fp32": "", + "int8": 836.44, + "fp16": 775.11, + "fp32": 866.31, "bf16": "" } ], @@ -12938,7 +11853,7 @@ "Precisions": [ { "int4": "", - "int8": 0.78, + "int8": 1.85, "fp16": "", "fp32": "", "bf16": "" @@ -12950,11 +11865,11 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", - "Model": "stable-diffusion-v1-5", - "featured_SKU": "false", - "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", + "Model": "yolo11", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12962,7 +11877,7 @@ "int4": "", "int8": "", "fp16": "", - "fp32": "", + "fp32": 381.29, "bf16": "" } ], @@ -12973,8 +11888,8 @@ "Precisions": [ { "int4": "", - "int8": 2.33, - "fp16": 2.36, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -12985,18 +11900,53 @@ } }, { - "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "yolo_v8n", - "featured_SKU": "false", + "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Accelerator Platforms", + "PlatformType": "Intel® Core™, iGPU-only", + "Parameters": { + "throughput": { + "Precisions": [ + { + "int4": "", + "int8": 395.69, + "fp16": 373.09, + "fp32": 372.46, + "bf16": "" + } + ], + "Unit": "FPS", + "UnitDesc": "higher is better" + }, + "latency": { + "Precisions": [ + { + "int4": "", + "int8": 3.12, + "fp16": "", + "fp32": "", + "bf16": "" + } + ], + "Unit": "ms", + "UnitDesc": "lower is better" + } + } + }, + { + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", + "Model": "bert-base-cased", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 759.93, - "fp16": 694.57, + "int8": 44.81, + "fp16": 37.44, "fp32": "", "bf16": "" } @@ -13008,7 +11958,7 @@ "Precisions": [ { "int4": "", - "int8": 1.96, + "int8": 19.86, "fp16": "", "fp32": "", "bf16": "" @@ -13020,19 +11970,54 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, iGPU-only", + "Parameters": { + "throughput": { + "Precisions": [ + { + "int4": "", + "int8": 65.88, + "fp16": 49.29, + "fp32": "", + "bf16": "" + } + ], + "Unit": "FPS", + "UnitDesc": "higher is better" + }, + "latency": { + "Precisions": [ + { + "int4": "", + "int8": 19.48, + "fp16": "", + "fp32": "", + "bf16": "" + } + ], + "Unit": "ms", + "UnitDesc": "lower is better" + } + } + }, + { + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": false, + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 36.93, + "int8": 0.5, "fp16": "", - "fp32": 27.64, + "fp32": "", "bf16": "" } ], @@ -13055,19 +12040,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 484.32, - "fp16": "", - "fp32": 278.4, + "int8": 749.72, + "fp16": 471.55, + "fp32": "", "bf16": "" } ], @@ -13078,7 +12063,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.65, "fp16": "", "fp32": "", "bf16": "" @@ -13090,19 +12075,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 112.23, - "fp16": "", - "fp32": 42.14, + "int8": 202.55, + "fp16": 115.74, + "fp32": "", "bf16": "" } ], @@ -13113,7 +12098,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 4.94, "fp16": "", "fp32": "", "bf16": "" @@ -13125,19 +12110,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2.04, - "fp16": "", - "fp32": 0.6, + "int8": 5.54, + "fp16": 2.64, + "fp32": "", "bf16": "" } ], @@ -13148,7 +12133,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 173.03, "fp16": "", "fp32": "", "bf16": "" @@ -13160,19 +12145,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 216.96, - "fp16": "", - "fp32": 94.92, + "int8": 372.94, + "fp16": 217.42, + "fp32": "", "bf16": "" } ], @@ -13183,7 +12168,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.1, "fp16": "", "fp32": "", "bf16": "" @@ -13195,19 +12180,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", - "Model": "yolo11", + "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", + "Model": "yolo_v8n", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU+iGPU", + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 34.52, + "int8": 122.89, + "fp16": 76.56, + "fp32": "", "bf16": "" } ], @@ -13218,7 +12203,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 8.19, "fp16": "", "fp32": "", "bf16": "" @@ -13230,19 +12215,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU+iGPU", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU+iGPU", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 61.06, - "fp16": "", - "fp32": 28.61, + "int8": 47.22, + "fp16": 39.62, + "fp32": "", "bf16": "" } ], @@ -13253,7 +12238,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 18.47, "fp16": "", "fp32": "", "bf16": "" @@ -13265,19 +12250,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 15.44, - "fp16": "", - "fp32": 12.75, + "int8": 80.29, + "fp16": 59.69, + "fp32": "", "bf16": "" } ], @@ -13288,7 +12273,7 @@ "Precisions": [ { "int4": "", - "int8": 66.23, + "int8": 14.57, "fp16": "", "fp32": "", "bf16": "" @@ -13300,19 +12285,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 296.53, + "int8": 0.53, "fp16": "", - "fp32": 183.3, + "fp32": "", "bf16": "" } ], @@ -13323,7 +12308,7 @@ "Precisions": [ { "int4": "", - "int8": 3.8, + "int8": 1510.69, "fp16": "", "fp32": "", "bf16": "" @@ -13335,19 +12320,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 48.77, - "fp16": "", - "fp32": 20.13, + "int8": 771.99, + "fp16": 512.35, + "fp32": "", "bf16": "" } ], @@ -13358,7 +12343,7 @@ "Precisions": [ { "int4": "", - "int8": 21.88, + "int8": 1.49, "fp16": "", "fp32": "", "bf16": "" @@ -13370,19 +12355,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 0.82, - "fp16": "", - "fp32": 0.31, + "int8": 223.26, + "fp16": 127.1, + "fp32": "", "bf16": "" } ], @@ -13393,7 +12378,7 @@ "Precisions": [ { "int4": "", - "int8": 1224.62, + "int8": 4.32, "fp16": "", "fp32": "", "bf16": "" @@ -13405,19 +12390,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 106.12, - "fp16": "", - "fp32": 49.52, + "int8": 5.8, + "fp16": 2.85, + "fp32": "", "bf16": "" } ], @@ -13428,7 +12413,7 @@ "Precisions": [ { "int4": "", - "int8": 9.72, + "int8": 144.65, "fp16": "", "fp32": "", "bf16": "" @@ -13440,19 +12425,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", - "Model": "yolo11", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Atom™, CPU-only", + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 15.36, + "int8": 407.72, + "fp16": 234.08, + "fp32": "", "bf16": "" } ], @@ -13463,7 +12448,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 2.76, "fp16": "", "fp32": "", "bf16": "" @@ -13475,19 +12460,19 @@ } }, { - "Platform": "Intel® Processor N100 CPU-only", + "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 23.65, - "fp16": "", - "fp32": 12.86, + "int8": 130.6, + "fp16": 82.83, + "fp32": "", "bf16": "" } ], @@ -13498,7 +12483,7 @@ "Precisions": [ { "int4": "", - "int8": 43.43, + "int8": 7.1, "fp16": "", "fp32": "", "bf16": "" @@ -13510,18 +12495,18 @@ } }, { - "Platform": "Intel® Processor N100 iGPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 33.69, - "fp16": 30.91, + "int8": 68.08, + "fp16": 53.55, "fp32": "", "bf16": "" } @@ -13533,7 +12518,7 @@ "Precisions": [ { "int4": "", - "int8": 38.02, + "int8": 17.09, "fp16": "", "fp32": "", "bf16": "" @@ -13545,18 +12530,18 @@ } }, { - "Platform": "Intel® Processor N100 iGPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 337.95, - "fp16": 267.38, + "int8": 91.72, + "fp16": 72.17, "fp32": "", "bf16": "" } @@ -13568,7 +12553,7 @@ "Precisions": [ { "int4": "", - "int8": 3.84, + "int8": 18.1, "fp16": "", "fp32": "", "bf16": "" @@ -13580,18 +12565,18 @@ } }, { - "Platform": "Intel® Processor N100 iGPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 81.72, - "fp16": 49.76, + "int8": 0.82, + "fp16": "", "fp32": "", "bf16": "" } @@ -13603,7 +12588,7 @@ "Precisions": [ { "int4": "", - "int8": 13.15, + "int8": 1130.75, "fp16": "", "fp32": "", "bf16": "" @@ -13615,18 +12600,18 @@ } }, { - "Platform": "Intel® Processor N100 iGPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1.62, - "fp16": 1.01, + "int8": 720.79, + "fp16": 566.9, "fp32": "", "bf16": "" } @@ -13638,7 +12623,7 @@ "Precisions": [ { "int4": "", - "int8": 622.97, + "int8": 2.08, "fp16": "", "fp32": "", "bf16": "" @@ -13650,18 +12635,18 @@ } }, { - "Platform": "Intel® Processor N100 iGPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 164.31, - "fp16": 106.85, + "int8": 265.78, + "fp16": 174.38, "fp32": "", "bf16": "" } @@ -13673,7 +12658,7 @@ "Precisions": [ { "int4": "", - "int8": 7.35, + "int8": 4.84, "fp16": "", "fp32": "", "bf16": "" @@ -13685,18 +12670,18 @@ } }, { - "Platform": "Intel® Processor N100 iGPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Atom™, iGPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 47.04, - "fp16": 34.97, + "int8": 8.24, + "fp16": 4.65, "fp32": "", "bf16": "" } @@ -13708,7 +12693,7 @@ "Precisions": [ { "int4": "", - "int8": 23.03, + "int8": 118.74, "fp16": "", "fp32": "", "bf16": "" @@ -13720,19 +12705,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 218.18, - "fp16": "", - "fp32": 80.36, + "int8": 455.85, + "fp16": 299.6, + "fp32": "", "bf16": "" } ], @@ -13743,7 +12728,7 @@ "Precisions": [ { "int4": "", - "int8": 14.4, + "int8": 3.33, "fp16": "", "fp32": "", "bf16": "" @@ -13755,19 +12740,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ i7-1185G7 iGPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 271.94, - "fp16": "", - "fp32": 167.25, + "int8": 160.91, + "fp16": 111.69, + "fp32": "", "bf16": "" } ], @@ -13778,7 +12763,7 @@ "Precisions": [ { "int4": "", - "int8": 11.07, + "int8": 8.16, "fp16": "", "fp32": "", "bf16": "" @@ -13790,19 +12775,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3.26, - "fp16": "", - "fp32": 0.9, + "int8": 47.65, + "fp16": 39.47, + "fp32": "", "bf16": "" } ], @@ -13813,7 +12798,7 @@ "Precisions": [ { "int4": "", - "int8": 637.88, + "int8": 21.58, "fp16": "", "fp32": "", "bf16": "" @@ -13825,19 +12810,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 5417.98, - "fp16": "", - "fp32": 1926.0, + "int8": 56.45, + "fp16": 41.79, + "fp32": "", "bf16": "" } ], @@ -13848,7 +12833,7 @@ "Precisions": [ { "int4": "", - "int8": 1.45, + "int8": 23.58, "fp16": "", "fp32": "", "bf16": "" @@ -13860,19 +12845,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 979.5, - "fp16": "", - "fp32": 267.16, + "int8": 0.54, + "fp16": 0.55, + "fp32": "", "bf16": "" } ], @@ -13883,7 +12868,7 @@ "Precisions": [ { "int4": "", - "int8": 3.06, + "int8": 1632.29, "fp16": "", "fp32": "", "bf16": "" @@ -13895,19 +12880,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 17.65, - "fp16": "", - "fp32": 4.58, + "int8": 625.04, + "fp16": 428.1, + "fp32": "", "bf16": "" } ], @@ -13918,7 +12903,7 @@ "Precisions": [ { "int4": "", - "int8": 116.19, + "int8": 1.79, "fp16": "", "fp32": "", "bf16": "" @@ -13930,19 +12915,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2104.85, - "fp16": "", - "fp32": 639.65, + "int8": 203.94, + "fp16": 116.2, + "fp32": "", "bf16": "" } ], @@ -13953,7 +12938,7 @@ "Precisions": [ { "int4": "", - "int8": 1.56, + "int8": 5.45, "fp16": "", "fp32": "", "bf16": "" @@ -13965,19 +12950,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "yolo11", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 206.18, + "int8": 5.47, + "fp16": 3.15, + "fp32": "", "bf16": "" } ], @@ -13988,7 +12973,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 169.33, "fp16": "", "fp32": "", "bf16": "" @@ -14000,19 +12985,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 5218T CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 440.56, - "fp16": "", - "fp32": 173.57, + "int8": 363.85, + "fp16": 227.22, + "fp32": "", "bf16": "" } ], @@ -14023,7 +13008,7 @@ "Precisions": [ { "int4": "", - "int8": 5.93, + "int8": 3.43, "fp16": "", "fp32": "", "bf16": "" @@ -14035,19 +13020,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Core™ i7-1185GRE iGPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 426.19, - "fp16": "", - "fp32": 162.63, + "int8": 112.45, + "fp16": 76.19, + "fp32": "", "bf16": "" } ], @@ -14058,7 +13043,7 @@ "Precisions": [ { "int4": "", - "int8": 11.09, + "int8": 9.34, "fp16": "", "fp32": "", "bf16": "" @@ -14070,19 +13055,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 411.51, - "fp16": "", - "fp32": 254.65, + "int8": 90.29, + "fp16": 70.31, + "fp32": "", "bf16": "" } ], @@ -14093,7 +13078,7 @@ "Precisions": [ { "int4": "", - "int8": 8.51, + "int8": 12.83, "fp16": "", "fp32": "", "bf16": "" @@ -14105,19 +13090,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 6.45, - "fp16": "", - "fp32": 1.65, + "int8": 128.75, + "fp16": 97.8, + "fp32": "", "bf16": "" } ], @@ -14128,7 +13113,7 @@ "Precisions": [ { "int4": "", - "int8": 321.85, + "int8": 12.86, "fp16": "", "fp32": "", "bf16": "" @@ -14140,19 +13125,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 10273.19, + "int8": 1.04, "fp16": "", - "fp32": 3342.96, + "fp32": "", "bf16": "" } ], @@ -14163,7 +13148,7 @@ "Precisions": [ { "int4": "", - "int8": 1.21, + "int8": 973.17, "fp16": "", "fp32": "", "bf16": "" @@ -14175,19 +13160,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2125.81, - "fp16": "", - "fp32": 570.61, + "int8": 1284.15, + "fp16": 902.47, + "fp32": "", "bf16": "" } ], @@ -14198,7 +13183,7 @@ "Precisions": [ { "int4": "", - "int8": 1.84, + "int8": 1.11, "fp16": "", "fp32": "", "bf16": "" @@ -14210,19 +13195,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 41.83, - "fp16": "", - "fp32": 10.91, + "int8": 384.22, + "fp16": 227.94, + "fp32": "", "bf16": "" } ], @@ -14233,7 +13218,7 @@ "Precisions": [ { "int4": "", - "int8": 49.53, + "int8": 3.29, "fp16": "", "fp32": "", "bf16": "" @@ -14245,19 +13230,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 4376.71, - "fp16": "", - "fp32": 1244.57, + "int8": 10.48, + "fp16": 6.14, + "fp32": "", "bf16": "" } ], @@ -14268,7 +13253,7 @@ "Precisions": [ { "int4": "", - "int8": 1.22, + "int8": 100.2, "fp16": "", "fp32": "", "bf16": "" @@ -14280,19 +13265,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", - "Model": "yolo11", + "Platform": "Intel® Core™ i7-12700H iGPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 383.86, + "int8": 744.28, + "fp16": 414.98, + "fp32": "", "bf16": "" } ], @@ -14303,7 +13288,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1.88, "fp16": "", "fp32": "", "bf16": "" @@ -14315,19 +13300,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6238L CPU-only", + "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 749.14, - "fp16": "", - "fp32": 338.04, + "int8": 217.24, + "fp16": 148.8, + "fp32": "", "bf16": "" } ], @@ -14338,7 +13323,7 @@ "Precisions": [ { "int4": "", - "int8": 4.21, + "int8": 5.62, "fp16": "", "fp32": "", "bf16": "" @@ -14350,19 +13335,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "bert-base-cased", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 622.71, - "fp16": "", - "fp32": 240.52, + "int8": 66.74, + "fp16": 52.5, + "fp32": "", "bf16": "" } ], @@ -14373,7 +13358,7 @@ "Precisions": [ { "int4": "", - "int8": 6.4, + "int8": 14.42, "fp16": "", "fp32": "", "bf16": "" @@ -14385,19 +13370,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "efficientdet-d0", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 721.9, - "fp16": "", - "fp32": 423.3, + "int8": 98.48, + "fp16": 73.51, + "fp32": "", "bf16": "" } ], @@ -14408,7 +13393,7 @@ "Precisions": [ { "int4": "", - "int8": 4.83, + "int8": 13.42, "fp16": "", "fp32": "", "bf16": "" @@ -14420,30 +13405,30 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "gemma-2-9b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 10.46, + "int4": 6.2, + "int8": 3.87, "fp16": "", - "fp32": 2.45, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 221.46, + "int4": 161.18, + "int8": 258.1, "fp16": "", "fp32": "", "bf16": "" @@ -14455,30 +13440,30 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "glm-4-9b-chat", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 16509.95, + "int4": 7.35, + "int8": 4.32, "fp16": "", - "fp32": 5201.56, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 0.59, + "int4": 135.94, + "int8": 231.26, "fp16": "", "fp32": "", "bf16": "" @@ -14490,30 +13475,30 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "llama-2-7b-chat", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 3352.09, + "int4": 8.49, + "int8": 5.7, "fp16": "", - "fp32": 825.5, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1.34, + "int4": 117.65, + "int8": 175.21, "fp16": "", "fp32": "", "bf16": "" @@ -14525,30 +13510,30 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "llama-3-8b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 60.91, + "int4": 8.67, + "int8": 5.1, "fp16": "", - "fp32": 15.11, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 36.91, + "int4": 115.26, + "int8": 195.88, "fp16": "", "fp32": "", "bf16": "" @@ -14560,31 +13545,31 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "llama-3.2-3b-instruct", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 6975.09, - "fp16": "", - "fp32": 1755.62, + "int4": 16.31, + "int8": 11.96, + "fp16": 6.49, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 0.77, - "fp16": "", + "int4": 61.29, + "int8": 83.56, + "fp16": 153.99, "fp32": "", "bf16": "" } @@ -14595,19 +13580,19 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "yolo11", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.73, "fp16": "", - "fp32": 571.3, + "fp32": "", "bf16": "" } ], @@ -14618,7 +13603,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 1197.61, "fp16": "", "fp32": "", "bf16": "" @@ -14630,30 +13615,30 @@ } }, { - "Platform": "Intel® Xeon® Gold 6338N CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "mistral-7b-v0.1", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 1224.86, + "int4": 8.95, + "int8": 5.55, "fp16": "", - "fp32": 495.73, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 2.98, + "int4": 111.61, + "int8": 180.11, "fp16": "", "fp32": "", "bf16": "" @@ -14665,19 +13650,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "mobilenet-v2", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 587.54, - "fp16": "", - "fp32": 225.64, + "int8": 874.69, + "fp16": 627.62, + "fp32": "", "bf16": "" } ], @@ -14688,7 +13673,7 @@ "Precisions": [ { "int4": "", - "int8": 9.18, + "int8": 1.37, "fp16": "", "fp32": "", "bf16": "" @@ -14700,31 +13685,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 580.8, - "fp16": "", - "fp32": 343.39, + "int8": 9.98, + "fp16": 5.34, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 6.9, - "fp16": "", + "int8": 100.15, + "fp16": 187.11, "fp32": "", "bf16": "" } @@ -14735,30 +13720,30 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "qwen2-7b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 8.58, + "int4": 9.11, + "int8": 5.41, "fp16": "", - "fp32": 2.26, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 248.72, + "int4": 109.7, + "int8": 184.76, "fp16": "", "fp32": "", "bf16": "" @@ -14770,19 +13755,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "resnet-50", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 14930.31, - "fp16": "", - "fp32": 4646.16, + "int8": 276.72, + "fp16": 163.88, + "fp32": "", "bf16": "" } ], @@ -14793,7 +13778,7 @@ "Precisions": [ { "int4": "", - "int8": 0.93, + "int8": 3.86, "fp16": "", "fp32": "", "bf16": "" @@ -14805,19 +13790,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "ssd-resnet34-1200", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 2965.31, - "fp16": "", - "fp32": 761.01, + "int8": 7.08, + "fp16": 3.98, + "fp32": "", "bf16": "" } ], @@ -14828,7 +13813,7 @@ "Precisions": [ { "int4": "", - "int8": 1.59, + "int8": 127.65, "fp16": "", "fp32": "", "bf16": "" @@ -14840,19 +13825,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 58.15, - "fp16": "", - "fp32": 15.0, + "int8": 484.68, + "fp16": 298.46, + "fp32": "", "bf16": "" } ], @@ -14863,7 +13848,7 @@ "Precisions": [ { "int4": "", - "int8": 37.18, + "int8": 2.46, "fp16": "", "fp32": "", "bf16": "" @@ -14875,19 +13860,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", + "Model": "yolo_v8n", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 6130.48, - "fp16": "", - "fp32": 1654.84, + "int8": 163.2, + "fp16": 106.34, + "fp32": "", "bf16": "" } ], @@ -14898,7 +13883,7 @@ "Precisions": [ { "int4": "", - "int8": 1.2, + "int8": 6.36, "fp16": "", "fp32": "", "bf16": "" @@ -14910,19 +13895,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "yolo11", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "efficientdet-d0", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 512.57, + "int8": 146.34, + "fp16": 116.04, + "fp32": "", "bf16": "" } ], @@ -14933,7 +13918,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 9.88, "fp16": "", "fp32": "", "bf16": "" @@ -14945,30 +13930,30 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8280 CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "llama-3-8b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 996.59, + "int4": 10.75, + "int8": "", "fp16": "", - "fp32": 452.05, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 3.6, + "int4": 93.02, + "int8": "", "fp16": "", "fp32": "", "bf16": "" @@ -14980,30 +13965,30 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "llama-3.2-3b-instruct", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 881.04, + "int4": 22.54, + "int8": 16.7, "fp16": "", - "fp32": 338.12, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 5.18, + "int4": 44.35, + "int8": 59.85, "fp16": "", "fp32": "", "bf16": "" @@ -15015,19 +14000,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": false, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1009.71, + "int8": 5.93, "fp16": "", - "fp32": 562.38, + "fp32": "", "bf16": "" } ], @@ -15038,7 +14023,7 @@ "Precisions": [ { "int4": "", - "int8": 4.28, + "int8": 189.11, "fp16": "", "fp32": "", "bf16": "" @@ -15050,31 +14035,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "gemma-2-9b", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "mistral-7b-v0.1", "featured_SKU": false, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 20.78, - "int8": 14.18, - "fp16": 7.72, + "int4": 12.03, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 48.12, - "int8": 70.5, - "fp16": 129.51, + "int4": 83.06, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -15085,31 +14070,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "glm-4-9b-chat", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 22.79, - "int8": 15.56, - "fp16": 8.48, + "int4": 20.08, + "int8": 13.67, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 43.86, - "int8": 64.26, - "fp16": 117.92, + "int4": 49.78, + "int8": 73.11, + "fp16": "", "fp32": "", "bf16": "" } @@ -15120,31 +14105,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "llama-2-7b-chat", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "qwen2-7b", "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 25.41, - "int8": 18.68, - "fp16": 10.61, + "int4": 11.39, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 39.34, - "int8": 53.51, - "fp16": 94.17, + "int4": 87.76, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -15155,18 +14140,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "llama-3-8b", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "resnet-50", "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 26.07, - "int8": 17.66, - "fp16": 9.72, + "int4": "", + "int8": "", + "fp16": 504.64, "fp32": "", "bf16": "" } @@ -15177,9 +14162,9 @@ "latency": { "Precisions": [ { - "int4": 38.35, - "int8": 56.62, - "fp16": 102.88, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -15190,18 +14175,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "llama-3.2-3b-instruct", + "Platform": "Intel® Data Center GPU Flex 140 dGPU", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": false, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 46.81, - "int8": 33.54, - "fp16": 19.32, + "int4": "", + "int8": 983.44, + "fp16": 762.41, "fp32": "", "bf16": "" } @@ -15212,9 +14197,9 @@ "latency": { "Precisions": [ { - "int4": 21.36, - "int8": 29.81, - "fp16": 51.74, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -15225,19 +14210,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", - "featured_SKU": false, + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "bert-base-cased", + "featured_SKU": "false", "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 14.73, - "fp16": "", - "fp32": 3.42, + "int8": 385.63, + "fp16": 437.6, + "fp32": "", "bf16": "" } ], @@ -15248,7 +14233,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 3.0, "fp16": "", "fp32": "", "bf16": "" @@ -15260,18 +14245,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "mistral-7b-v0.1", - "featured_SKU": false, + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "efficientdet-d0", + "featured_SKU": "false", "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 26.89, - "int8": 18.54, - "fp16": 10.22, + "int4": "", + "int8": 425.95, + "fp16": 365.18, "fp32": "", "bf16": "" } @@ -15282,9 +14267,9 @@ "latency": { "Precisions": [ { - "int4": 37.18, - "int8": 53.93, - "fp16": 97.8, + "int4": "", + "int8": 2.79, + "fp16": "", "fp32": "", "bf16": "" } @@ -15295,30 +14280,30 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "mobilenet-v2", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "gemma-2-9b", + "featured_SKU": "false", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 22703.47, + "int4": 22.54, + "int8": 18.33, "fp16": "", - "fp32": 6937.71, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 0.58, + "int4": 44.35, + "int8": 54.53, "fp16": "", "fp32": "", "bf16": "" @@ -15330,31 +14315,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "phi-3-mini-4k-instruct", - "featured_SKU": false, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "glm-4-9b-chat", + "featured_SKU": "false", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 39.41, - "int8": 29.28, - "fp16": 17.35, + "int4": 39.46, + "int8": 26.75, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 25.37, - "int8": 34.15, - "fp16": 57.61, + "int4": 25.34, + "int8": 37.37, + "fp16": "", "fp32": "", "bf16": "" } @@ -15365,31 +14350,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "qwen2-7b", - "featured_SKU": false, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "llama-2-7b-chat", + "featured_SKU": "false", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 28.26, - "int8": 19.32, - "fp16": 10.27, + "int4": 44.66, + "int8": 33.45, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 35.38, - "int8": 51.74, - "fp16": 97.35, + "int4": 22.39, + "int8": 29.89, + "fp16": "", "fp32": "", "bf16": "" } @@ -15400,30 +14385,30 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "resnet-50", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "llama-3-8b", + "featured_SKU": "false", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 4874.95, + "int4": 44.86, + "int8": 30.51, "fp16": "", - "fp32": 1144.73, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1.07, + "int4": 22.29, + "int8": 32.77, "fp16": "", "fp32": "", "bf16": "" @@ -15435,31 +14420,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "ssd-resnet34-1200", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "llama-3.2-3b-instruct", + "featured_SKU": "false", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 84.6, - "fp16": "", - "fp32": 20.95, + "int4": 71.27, + "int8": 57.11, + "fp16": 36.83, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": "", - "fp16": "", + "int4": 14.03, + "int8": 17.51, + "fp16": 27.15, "fp32": "", "bf16": "" } @@ -15470,19 +14455,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "ssd_mobilenet_v1_coco", - "featured_SKU": false, + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "mask_rcnn_resnet50_atrous_coco", + "featured_SKU": "false", "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 10174.18, - "fp16": "", - "fp32": 2524.59, + "int8": 32.44, + "fp16": 18.63, + "fp32": "", "bf16": "" } ], @@ -15493,7 +14478,7 @@ "Precisions": [ { "int4": "", - "int8": 0.7, + "int8": 47.53, "fp16": "", "fp32": "", "bf16": "" @@ -15505,31 +14490,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "stable-diffusion-v1-5", - "featured_SKU": false, + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "mistral-7b-v0.1", + "featured_SKU": "false", "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": "", + "int4": 44.42, + "int8": 32.11, "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 13.34, - "fp16": 13.66, + "int4": 22.51, + "int8": 31.14, + "fp16": "", "fp32": "", "bf16": "" } @@ -15540,19 +14525,19 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "yolo11", - "featured_SKU": false, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "mobilenet-v2", + "featured_SKU": "false", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 803.12, + "int8": 2978.92, + "fp16": 3132.03, + "fp32": "", "bf16": "" } ], @@ -15563,7 +14548,7 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 0.57, "fp16": "", "fp32": "", "bf16": "" @@ -15575,31 +14560,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8380 CPU-only", - "Model": "yolo_v8n", - "featured_SKU": false, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "phi-3-mini-4k-instruct", + "featured_SKU": "false", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 1704.08, - "fp16": "", - "fp32": 697.23, + "int4": 69.97, + "int8": 50.91, + "fp16": 32.48, + "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 2.36, - "fp16": "", + "int4": 14.29, + "int8": 19.64, + "fp16": 30.78, "fp32": "", "bf16": "" } @@ -15610,33 +14595,33 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "bert-base-cased", - "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "qwen2-7b", + "featured_SKU": "false", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 3023.92, + "int4": 45.02, + "int8": 32.48, "fp16": "", - "fp32": 483.11, - "bf16": 1976.63 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 3.79, + "int4": 22.21, + "int8": 30.78, "fp16": "", "fp32": "", - "bf16": 4.84 + "bf16": "" } ], "Unit": "ms", @@ -15645,20 +14630,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "efficientdet-d0", - "featured_SKU": true, + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "resnet-50", + "featured_SKU": "false", "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 1445.78, - "fp16": "", - "fp32": 861.51, - "bf16": 1021.75 + "int8": 1971.27, + "fp16": 1355.77, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -15668,10 +14653,10 @@ "Precisions": [ { "int4": "", - "int8": 4.69, + "int8": 0.78, "fp16": "", "fp32": "", - "bf16": 5.16 + "bf16": "" } ], "Unit": "ms", @@ -15680,18 +14665,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "gemma-2-9b", - "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "ssd-resnet34-1200", + "featured_SKU": "false", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 22.71, - "int8": 16.83, - "fp16": 10.76, + "int4": "", + "int8": 132.85, + "fp16": 80.37, "fp32": "", "bf16": "" } @@ -15702,9 +14687,9 @@ "latency": { "Precisions": [ { - "int4": 44.03, - "int8": 59.39, - "fp16": 92.87, + "int4": "", + "int8": 13.81, + "fp16": "", "fp32": "", "bf16": "" } @@ -15715,18 +14700,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "glm-4-9b-chat", - "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "ssd_mobilenet_v1_coco", + "featured_SKU": "false", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 23.7, - "int8": 16.93, - "fp16": 11.27, + "int4": "", + "int8": 2274.66, + "fp16": 1667.73, "fp32": "", "bf16": "" } @@ -15737,9 +14722,9 @@ "latency": { "Precisions": [ { - "int4": 42.19, - "int8": 59.04, - "fp16": 88.67, + "int4": "", + "int8": 0.78, + "fp16": "", "fp32": "", "bf16": "" } @@ -15750,18 +14735,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "llama-2-7b-chat", - "featured_SKU": true, + "Platform": "Intel® Data Center GPU Flex 170 dGPU", + "Model": "yolo_v8n", + "featured_SKU": "false", "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 26.11, - "int8": 20.1, - "fp16": 14.19, + "int4": "", + "int8": 756.78, + "fp16": 691.63, "fp32": "", "bf16": "" } @@ -15772,9 +14757,9 @@ "latency": { "Precisions": [ { - "int4": 38.29, - "int8": 49.73, - "fp16": 70.45, + "int4": "", + "int8": 1.97, + "fp16": "", "fp32": "", "bf16": "" } @@ -15785,20 +14770,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "llama-3-8b", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "bert-base-cased", "featured_SKU": true, - "whats_new_model": "false", + "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 26.02, - "int8": 18.97, - "fp16": 13.23, - "fp32": "", - "bf16": "" + "int4": "", + "int8": 8626.80, + "fp16": "", + "fp32": 1267.91, + "bf16": 6050.76 } ], "Unit": "FPS", @@ -15807,9 +14792,9 @@ "latency": { "Precisions": [ { - "int4": 38.42, - "int8": 52.71, - "fp16": 75.57, + "int4": "", + "int8": 7.68, + "fp16": "", "fp32": "", "bf16": "" } @@ -15820,20 +14805,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "llama-3.2-3b-instruct", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "efficientdet-d0", "featured_SKU": true, - "whats_new_model": true, + "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 45.68, - "int8": 36.96, - "fp16": 27.27, - "fp32": "", - "bf16": "" + "int4": "", + "int8": 3411.17, + "fp16": "", + "fp32": 2334.30, + "bf16": 2889.81 } ], "Unit": "FPS", @@ -15842,9 +14827,9 @@ "latency": { "Precisions": [ { - "int4": 21.89, - "int8": 27.05, - "fp16": 36.67, + "int4": "", + "int8": 9.21, + "fp16": "", "fp32": "", "bf16": "" } @@ -15855,33 +14840,33 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "gemma-2-9b", "featured_SKU": true, - "whats_new_model": false, + "whats_new_model": true, "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 62.13, + "int4": 119.07, + "int8": "", "fp16": "", - "fp32": 5.19, - "bf16": 37.54 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 58.49, + "int8": "", "fp16": "", "fp32": "", - "bf16": 81.95 + "bf16": "" } ], "Unit": "ms", @@ -15890,31 +14875,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "mistral-7b-v0.1", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "glm-4-9b-chat", "featured_SKU": true, - "whats_new_model": false, + "whats_new_model": "false", "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 27.42, - "int8": 19.9, - "fp16": 13.72, + "int4": 140.9, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 36.46, - "int8": 50.24, - "fp16": 72.84, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -15925,8 +14910,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "llama-2-7b-chat", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -15934,24 +14919,24 @@ "throughput": { "Precisions": [ { - "int4": "", - "int8": 38538.65, + "int4": 149.6, + "int8": "", "fp16": "", - "fp32": 10274.08, - "bf16": 25608.67 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 0.65, + "int8": "", "fp16": "", "fp32": "", - "bf16": 0.66 + "bf16": "" } ], "Unit": "ms", @@ -15960,31 +14945,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "phi-3-mini-4k-instruct", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "llama-3-8b", "featured_SKU": true, - "whats_new_model": true, + "whats_new_model": "false", "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 33.53, - "fp16": 23.1, + "int4": 153.0, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 24.06, - "int8": 29.82, - "fp16": 43.29, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -15995,8 +14980,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "qwen2-7b", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "llama-3.2-3b-instruct", "featured_SKU": true, "whats_new_model": true, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16004,22 +14989,22 @@ "throughput": { "Precisions": [ { - "int4": 30.03, - "int8": 22.14, - "fp16": 13.95, + "int4": "277.1", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 33.3, - "int8": 45.16, - "fp16": 71.68, + "int4": "", + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -16030,8 +15015,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16040,10 +15025,10 @@ "Precisions": [ { "int4": "", - "int8": 19226.96, + "int8": 144.22, "fp16": "", - "fp32": 1597.37, - "bf16": 7480.12 + "fp32": 12.07, + "bf16": 90.67 } ], "Unit": "FPS", @@ -16053,10 +15038,10 @@ "Precisions": [ { "int4": "", - "int8": 1.01, + "int8": 76.77, "fp16": "", "fp32": "", - "bf16": 1.25 + "bf16": "" } ], "Unit": "ms", @@ -16065,8 +15050,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "mistral-7b-v0.1", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16074,14 +15059,14 @@ "throughput": { "Precisions": [ { - "int4": "", - "int8": 434.12, + "int4": 154.9, + "int8": "", "fp16": "", - "fp32": 30.6, - "bf16": 209.11 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16100,8 +15085,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "mobilenet-v2", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16110,10 +15095,10 @@ "Precisions": [ { "int4": "", - "int8": 24134.02, + "int8": 37588.55, "fp16": "", - "fp32": 3392.4, - "bf16": 12168.49 + "fp32": 19668.20, + "bf16": 22418.62 } ], "Unit": "FPS", @@ -16123,10 +15108,10 @@ "Precisions": [ { "int4": "", - "int8": 0.74, + "int8": 1.81, "fp16": "", "fp32": "", - "bf16": 0.89 + "bf16": "" } ], "Unit": "ms", @@ -16135,31 +15120,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "stable-diffusion-v1-5", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": true, - "whats_new_model": false, + "whats_new_model": true, "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", + "int4": 225.5, "int8": "", "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 4.62, - "fp16": 4.55, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -16170,23 +15155,23 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "yolo11", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "qwen2-7b", "featured_SKU": true, - "whats_new_model": "false", + "whats_new_model": true, "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", + "int4": 165.5, "int8": "", "fp16": "", - "fp32": 1034.68, - "bf16": 2068.81 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16205,8 +15190,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "resnet-50", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16215,10 +15200,10 @@ "Precisions": [ { "int4": "", - "int8": 2380.51, + "int8": 29152.17, "fp16": "", - "fp32": 950.6, - "bf16": 2374.89 + "fp32": 4342.04, + "bf16": 21711.45 } ], "Unit": "FPS", @@ -16228,10 +15213,10 @@ "Precisions": [ { "int4": "", - "int8": 3.13, + "int8": 2.17, "fp16": "", "fp32": "", - "bf16": 2.54 + "bf16": "" } ], "Unit": "ms", @@ -16240,8 +15225,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "ssd-resnet34-1200", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16250,10 +15235,10 @@ "Precisions": [ { "int4": "", - "int8": 4671.04, + "int8": 997.71, "fp16": "", - "fp32": 560.3, - "bf16": 3211.93 + "fp32": 78.49, + "bf16": 517.96 } ], "Unit": "FPS", @@ -16263,10 +15248,10 @@ "Precisions": [ { "int4": "", - "int8": 3.66, + "int8": 10.12, "fp16": "", "fp32": "", - "bf16": 4.77 + "bf16": "" } ], "Unit": "ms", @@ -16275,8 +15260,8 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": true, "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", @@ -16285,10 +15270,10 @@ "Precisions": [ { "int4": "", - "int8": 1725.13, + "int8": 33085.55, "fp16": "", - "fp32": 1123.04, - "bf16": 1407.69 + "fp32": 9524.47, + "bf16": 16162.95 } ], "Unit": "FPS", @@ -16298,10 +15283,10 @@ "Precisions": [ { "int4": "", - "int8": 4.71, + "int8": 1.82, "fp16": "", "fp32": "", - "bf16": 4.84 + "bf16": "" } ], "Unit": "ms", @@ -16310,20 +15295,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "gemma-2-9b", + "Platform": "Intel® Xeon® Platinum 6979P", + "Model": "yolo_v8n", "featured_SKU": true, - "whats_new_model": true, + "whats_new_model": false, "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ { - "int4": 25.46, - "int8": 18.96, - "fp16": 12.14, - "fp32": "", - "bf16": "" + "int4": "", + "int8": 5975.29, + "fp16": "", + "fp32": 2698.86, + "bf16": 6021.30 } ], "Unit": "FPS", @@ -16332,9 +15317,9 @@ "latency": { "Precisions": [ { - "int4": 39.27, - "int8": 52.74, - "fp16": 82.36, + "int4": "", + "int8": 5.93, + "fp16": "", "fp32": "", "bf16": "" } @@ -16345,18 +15330,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "glm-4-9b-chat", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "bert-base-cased", "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 27.1, - "int8": 19.33, - "fp16": 12.69, + "int4": "", + "int8": 848.32, + "fp16": 743.75, "fp32": "", "bf16": "" } @@ -16367,9 +15352,9 @@ "latency": { "Precisions": [ { - "int4": 36.9, - "int8": 51.72, - "fp16": 78.77, + "int4": "", + "int8": 1.37, + "fp16": "", "fp32": "", "bf16": "" } @@ -16380,18 +15365,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "llama-2-7b-chat", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "efficientdet-d0", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 31.71, - "int8": 23.05, - "fp16": 16.64, + "int4": "", + "int8": 300.16, + "fp16": 313.15, "fp32": "", "bf16": "" } @@ -16402,9 +15387,9 @@ "latency": { "Precisions": [ { - "int4": 31.53, - "int8": 43.37, - "fp16": 60.07, + "int4": "", + "int8": 5.89, + "fp16": "", "fp32": "", "bf16": "" } @@ -16415,31 +15400,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "llama-3-8b", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "gemma-2-9b", "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 30.06, - "int8": 21.73, - "fp16": 14.93, + "int4": 45.3, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 33.26, - "int8": 46.01, - "fp16": 66.97, + "int4": 22.05, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -16450,31 +15435,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "llama-3.2-3b-instruct", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "glm-4-9b-chat", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 54.73, - "int8": 42.58, - "fp16": 31.51, + "int4": 64.5, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 18.27, - "int8": 23.48, - "fp16": 31.73, + "int4": 15.5, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -16485,33 +15470,33 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "llama-2-7b-chat", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 74.86, + "int4": 69.7, + "int8": "", "fp16": "", - "fp32": 6.39, - "bf16": 48.32 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 55.7, + "int4": 14.3, + "int8": "", "fp16": "", "fp32": "", - "bf16": 73.74 + "bf16": "" } ], "Unit": "ms", @@ -16520,31 +15505,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "mistral-7b-v0.1", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "llama-3-8b", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 33.27, - "int8": 22.24, - "fp16": 15.74, + "int4": 72.1, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 30.05, - "int8": 44.96, - "fp16": 63.51, + "int4": 13.9, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -16555,33 +15540,33 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "llama-3.2-3b-instruct", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 39894.55, + "int4": 121.4, + "int8": "", "fp16": "", - "fp32": 15839.75, - "bf16": 29419.55 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 0.84, + "int4": 8.2, + "int8": "", "fp16": "", "fp32": "", - "bf16": 0.72 + "bf16": "" } ], "Unit": "ms", @@ -16590,18 +15575,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "phi-3-mini-4k-instruct", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 40.45, - "fp16": 26.95, + "int8": 31.77, + "fp16": 17.24, "fp32": "", "bf16": "" } @@ -16613,8 +15598,8 @@ "Precisions": [ { "int4": "", - "int8": 24.72, - "fp16": 37.1, + "int8": 43.68, + "fp16": "", "fp32": "", "bf16": "" } @@ -16625,31 +15610,31 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "qwen2-7b", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "mistral-7b-v0.1", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 35.48, - "int8": 25.7, - "fp16": 16.1, + "int4": 70.3, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 28.18, - "int8": 38.91, - "fp16": 62.09, + "int4": 14.2, + "int8": "", + "fp16": "", "fp32": "", "bf16": "" } @@ -16660,20 +15645,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "mobilenet-v2", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 21612.82, - "fp16": "", - "fp32": 2002.36, - "bf16": 13669.05 + "int8": 3947.72, + "fp16": 4159.12, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -16683,10 +15668,10 @@ "Precisions": [ { "int4": "", - "int8": 1.0, + "int8": 0.39, "fp16": "", "fp32": "", - "bf16": 1.37 + "bf16": "" } ], "Unit": "ms", @@ -16695,29 +15680,29 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 513.09, + "int4": 108.5, + "int8": "", "fp16": "", - "fp32": 35.2, - "bf16": 275.94 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", + "int4": 9.2, "int8": "", "fp16": "", "fp32": "", @@ -16730,33 +15715,33 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "qwen2-7b", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 26748.89, + "int4": 78.4, + "int8": "", "fp16": "", - "fp32": 4718.18, - "bf16": 16684.87 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 0.72, + "int4": 12.8, + "int8": "", "fp16": "", "fp32": "", - "bf16": 1.15 + "bf16": "" } ], "Unit": "ms", @@ -16765,18 +15750,18 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "stable-diffusion-v1-5", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "resnet-50", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", + "int8": 3375.63, + "fp16": 1964.87, "fp32": "", "bf16": "" } @@ -16788,8 +15773,8 @@ "Precisions": [ { "int4": "", - "int8": 4.09, - "fp16": 3.99, + "int8": 0.42, + "fp16": "", "fp32": "", "bf16": "" } @@ -16800,20 +15785,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "yolo11", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "ssd-resnet34-1200", "featured_SKU": true, - "whats_new_model": "false", - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", - "fp32": 1455.5, - "bf16": 2962.49 + "int8": 50.49, + "fp16": 59.89, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -16823,10 +15808,10 @@ "Precisions": [ { "int4": "", - "int8": "", + "int8": 20.41, "fp16": "", "fp32": "", - "bf16": 3.19 + "bf16": "" } ], "Unit": "ms", @@ -16835,20 +15820,20 @@ } }, { - "Platform": "Intel® Xeon® Platinum 8580 CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3043.23, - "fp16": "", - "fp32": 1258.2, - "bf16": 3444.22 + "int8": 2304.40, + "fp16": 2174.73, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -16858,10 +15843,10 @@ "Precisions": [ { "int4": "", - "int8": 3.08, + "int8": 0.61, "fp16": "", "fp32": "", - "bf16": 2.56 + "bf16": "" } ], "Unit": "ms", @@ -16870,20 +15855,20 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "bert-base-cased", + "Platform": "Intel® Arc™ B-Series Graphics B580", + "Model": "yolo_v8n", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 8897.30, - "fp16": "", - "fp32": 1217.03, - "bf16": 6414.49 + "int8": 1127.03, + "fp16": 1076.39, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -16893,9 +15878,9 @@ "Precisions": [ { "int4": "", - "int8": 7.74, + "int8": 1.25, "fp16": "", - "fp32": 14.8, + "fp32": "", "bf16": "" } ], @@ -16905,20 +15890,20 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "efficientdet-d0", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "bert-base-cased", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 3384.23, - "fp16": "", - "fp32": 2295.4, - "bf16": 2872.84 + "int8": 788.23, + "fp16": 689.29, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -16928,9 +15913,9 @@ "Precisions": [ { "int4": "", - "int8": 9.71, + "int8": 1.43, "fp16": "", - "fp32": 9.43, + "fp32": "", "bf16": "" } ], @@ -16940,20 +15925,20 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "mask_rcnn_resnet50_atrous_coco", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "efficientdet-d0", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": 149.52, - "fp16": "", - "fp32": 11.97, - "bf16": 91.85 + "int8": 299.62, + "fp16": 298.77, + "fp32": "", + "bf16": "" } ], "Unit": "FPS", @@ -16963,9 +15948,9 @@ "Precisions": [ { "int4": "", - "int8": 74.6, + "int8": 5.98, "fp16": "", - "fp32": 248.21, + "fp32": "", "bf16": "" } ], @@ -16975,32 +15960,32 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "mobilenet-v2", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "gemma-2-9b", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 32737.09, + "int4": 38.6, + "int8": "", "fp16": "", - "fp32": 25621.92, - "bf16": 26297.21 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 1.65, + "int4": 25.9, + "int8": "", "fp16": "", - "fp32": 1.34, + "fp32": "", "bf16": "" } ], @@ -17010,32 +15995,32 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "resnet-50", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "glm-4-9b-chat", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 27670.82, + "int4": 55.1, + "int8": "", "fp16": "", - "fp32": 4254.94, - "bf16": 22432.74 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 2.28, + "int4": 18.1, + "int8": "", "fp16": "", - "fp32": 3.69, + "fp32": "", "bf16": "" } ], @@ -17045,32 +16030,32 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "ssd-resnet34-1200", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "llama-2-7b-chat", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 1009.62, + "int4": 60.9, + "int8": "", "fp16": "", - "fp32": 77.99, - "bf16": 532.90 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 9.73, + "int4": 16.4, + "int8": "", "fp16": "", - "fp32": 34.1, + "fp32": "", "bf16": "" } ], @@ -17080,32 +16065,32 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "ssd_mobilenet_v1_coco", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "llama-3-8b", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": "false", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 29674.40, + "int4": 63.4, + "int8": "", "fp16": "", - "fp32": 9800.83, - "bf16": 19479.18 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 2.08, + "int4": 15.8, + "int8": "", "fp16": "", - "fp32": 2.45, + "fp32": "", "bf16": "" } ], @@ -17115,32 +16100,32 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "yolo_v8n", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "llama-3.2-3b-instruct", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": "", - "int8": 5590.87, + "int4": 110.3, + "int8": "", "fp16": "", - "fp32": 2699.0, - "bf16": 6003.66 + "fp32": "", + "bf16": "" } ], - "Unit": "FPS", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": "", - "int8": 6.56, + "int4": 9.1, + "int8": "", "fp16": "", - "fp32": 5.59, + "fp32": "", "bf16": "" } ], @@ -17150,31 +16135,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "gemma-2-9b", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "mask_rcnn_resnet50_atrous_coco", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 136.4, - "int8": "", - "fp16": 53.6, + "int4": "", + "int8": 26.85, + "fp16": 15.06, "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "FPS", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 7.3, - "int8": "", - "fp16": 18.7, + "int4": "", + "int8": 49.87, + "fp16": "", "fp32": "", "bf16": "" } @@ -17185,31 +16170,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "glm-4-9b-chat", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "mistral-7b-v0.1", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 116.5, + "int4": 62.7, "int8": "", - "fp16": 51.9, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 8.6, + "int4": 16.0, "int8": "", - "fp16": 19, + "fp16": "", "fp32": "", "bf16": "" } @@ -17220,31 +16205,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "llama-2-7b-chat", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "mobilenet-v2", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 139.5, - "int8": "", - "fp16": 132, + "int4": "", + "int8": 4116.03, + "fp16": 3936.74, "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "FPS", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 7.2, - "int8": "", - "fp16": 7.6, + "int4": "", + "int8": 0.40, + "fp16": "", "fp32": "", "bf16": "" } @@ -17255,32 +16240,32 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "llama-3.2-3b-instruct", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "phi-3-mini-4k-instruct", "featured_SKU": true, "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 272.7, - "int8": 65, + "int4": 96.3, + "int8": "", "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 3.7, + "int4": 10.4, "int8": "", "fp16": "", - "fp32": 15.4, + "fp32": "", "bf16": "" } ], @@ -17290,31 +16275,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "llama-3-8b", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "qwen2-7b", "featured_SKU": true, - "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": true, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 148.2, + "int4": 68.2, "int8": "", - "fp16": 57.2, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "Tokens per Sec", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 6.7, + "int4": 14.7, "int8": "", - "fp16": 17.5, + "fp16": "", "fp32": "", "bf16": "" } @@ -17325,31 +16310,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "mistral-7b-v0.1", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "resnet-50", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 126.4, - "int8": "", - "fp16": 61.4, + "int4": "", + "int8": 2830.65, + "fp16": 1851.03, "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "FPS", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 7.9, - "int8": "", - "fp16": 16.3, + "int4": "", + "int8": 0.45, + "fp16": "", "fp32": "", "bf16": "" } @@ -17360,31 +16345,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "phi-3-mini-4k-instruct", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "ssd-resnet34-1200", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 176.6, - "int8": "", - "fp16": 111.9, + "int4": "", + "int8": 57.74, + "fp16": 47.88, "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "FPS", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 5.7, - "int8": "", - "fp16": 8.9, + "int4": "", + "int8": 21.12, + "fp16": "", "fp32": "", "bf16": "" } @@ -17395,31 +16380,31 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "qwen2-7b", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "ssd_mobilenet_v1_coco", "featured_SKU": true, - "whats_new_model": true, - "PlatformType": "Intel® Xeon®, CPU-only", + "whats_new_model": false, + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { - "int4": 164.4, - "int8": "", - "fp16": 62.2, + "int4": "", + "int8": 2202.46, + "fp16": 2063.27, "fp32": "", "bf16": "" } ], - "Unit": "Tokens/sec", + "Unit": "FPS", "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { - "int4": 6.1, - "int8": "", - "fp16": 16.1, + "int4": "", + "int8": 0.63, + "fp16": "", "fp32": "", "bf16": "" } @@ -17430,36 +16415,36 @@ } }, { - "Platform": "Intel® Xeon® 6979P CPU-only", - "Model": "stable-diffusion-v1-5", + "Platform": "Intel® Arc™ B-Series Graphics B570", + "Model": "yolo_v8n", "featured_SKU": true, "whats_new_model": false, - "PlatformType": "Intel® Xeon®, CPU-only", + "PlatformType": "Accelerator Platforms", "Parameters": { "throughput": { "Precisions": [ { "int4": "", - "int8": "", - "fp16": "", + "int8": 946.02, + "fp16": 943.72, "fp32": "", "bf16": "" } ], - "Unit": "n/a", - "UnitDesc": "n/a" + "Unit": "FPS", + "UnitDesc": "higher is better" }, "latency": { "Precisions": [ { "int4": "", - "int8": 4.0, - "fp16": 4.1, + "int8": 1.47, + "fp16": "", "fp32": "", "bf16": "" } ], - "Unit": "Image gen. time in sec.", + "Unit": "ms", "UnitDesc": "lower is better" } } From 87370fe7723831316a216189cc808f409a98ad52 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Wed, 18 Dec 2024 11:01:51 +0100 Subject: [PATCH 07/60] [GHA] Enabled snippets tests in smart CI (#28096) ### Details: Addressed CI issue from https://github.com/openvinotoolkit/openvino/pull/28035 ### Tickets: - *[159467](https://jira.devtools.intel.com/browse/CVS-159467)* --- .github/components.yml | 7 +++++++ .github/labeler.yml | 4 +++- .github/workflows/job_python_api_tests.yml | 11 +++++++++-- .github/workflows/linux_arm64.yml | 2 +- .github/workflows/mac.yml | 2 +- .github/workflows/mac_arm64.yml | 2 +- .github/workflows/manylinux_2014.yml | 2 +- .github/workflows/ubuntu_22.yml | 2 +- .github/workflows/ubuntu_24.yml | 2 +- 9 files changed, 25 insertions(+), 9 deletions(-) diff --git a/.github/components.yml b/.github/components.yml index 74247e1f051cd5..31952e2b87c114 100644 --- a/.github/components.yml +++ b/.github/components.yml @@ -175,6 +175,7 @@ Python_API: - OVC - tools - TF_FE + - docs_snippets build: - CPU - HETERO @@ -243,6 +244,12 @@ tools: docs: revalidate: [] build: [] + +docs_snippets: + revalidate: + - docs_snippets + build: + - Python_API licensing: revalidate: [] diff --git a/.github/labeler.yml b/.github/labeler.yml index e9b2acb26c9072..cb05d3dea36960 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -57,10 +57,12 @@ 'category: docs': - '**/*.md' - any: ['docs/**/*', - '!docs/snippets/**/*'] + '!docs/snippets/**/*', + '!docs/articles_en/assets/snippets/**/*'] 'category: docs_snippets': - 'docs/snippets/**/*' +- 'docs/articles_en/assets/snippets/**/*' 'category: extensions': - 'src/core/include/openvino/core/extension.hpp' diff --git a/.github/workflows/job_python_api_tests.yml b/.github/workflows/job_python_api_tests.yml index 81092db2bb808c..e12001cd67afba 100644 --- a/.github/workflows/job_python_api_tests.yml +++ b/.github/workflows/job_python_api_tests.yml @@ -12,6 +12,10 @@ on: type: string required: false default: '{"image": null}' + affected-components: + description: 'Components that are affected by changes in the commit defined by the Smart CI Action' + type: string + required: true python-version: description: 'Python version to setup. E.g., "3.11"' type: string @@ -25,6 +29,7 @@ env: jobs: Python_Unit_Tests: name: Python API tests + if: ${{ fromJSON(inputs.affected-components).Python_API.test || fromJSON(inputs.affected-components).docs_snippets.test }} timeout-minutes: 30 runs-on: ${{ inputs.runner }} container: ${{ fromJSON(inputs.container) }} @@ -95,6 +100,7 @@ jobs: # - name: Python API Tests + if: fromJSON(inputs.affected-components).Python_API.test run: | # for 'template' extension export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}/tests/:$LD_LIBRARY_PATH @@ -103,6 +109,7 @@ jobs: --ignore=${INSTALL_TEST_DIR}/tests/pyopenvino/tests/test_utils/test_utils.py - name: Python API Tests -- numpy<2.0.0 + if: fromJSON(inputs.affected-components).Python_API.test run: | python3 -m pip uninstall -y numpy python3 -m pip install "numpy~=1.26.0" @@ -114,7 +121,7 @@ jobs: --ignore=${INSTALL_TEST_DIR}/tests/pyopenvino/tests/test_utils/test_utils.py - name: Clone API snippets - if: runner.os != 'macOS' + if: ${{ runner.os != 'macOS' && fromJSON(inputs.affected-components).docs_snippets.test }} uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 timeout-minutes: 15 with: @@ -123,7 +130,7 @@ jobs: submodules: 'false' - name: Docs Python snippets - if: runner.os != 'macOS' + if: ${{ runner.os != 'macOS' && fromJSON(inputs.affected-components).docs_snippets.test }} run: | # torch, onnx python3 -m pip install -r ${INSTALL_TEST_DIR}/tests/python/preprocess/torchvision/requirements.txt -r ${INSTALL_TEST_DIR}/tests/requirements_onnx diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 9ca6c5461a62ea..66e825e5d5e126 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -179,8 +179,8 @@ jobs: with: runner: 'aks-linux-16-cores-arm' container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_20_04_arm64 }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test TensorFlow_Layer_Tests: name: TensorFlow Layer Tests diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 94460a2721b60f..5e4335b8151c02 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -326,8 +326,8 @@ jobs: python-version: [ '3.9', '3.10', '3.11', '3.12' ] with: runner: 'macos-13' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: ${{ matrix.python-version }} - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test Python_Unit_Tests: name: Python unit tests diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 3340ce62e0104f..855d76973cc2e4 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -334,8 +334,8 @@ jobs: python-version: [ '3.9', '3.10', '3.11', '3.12' ] with: runner: 'macos-13-xlarge' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: ${{ matrix.python-version }} - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test TensorFlow_Layer_Tests: name: TensorFlow Layer Tests diff --git a/.github/workflows/manylinux_2014.yml b/.github/workflows/manylinux_2014.yml index 4b5fc137c1504e..d6b3daa12abb57 100644 --- a/.github/workflows/manylinux_2014.yml +++ b/.github/workflows/manylinux_2014.yml @@ -266,8 +266,8 @@ jobs: with: runner: 'aks-linux-4-cores-16gb' container: '{"image": "${{ matrix.image }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: ${{ matrix.python-version }} - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test Overall_Status: name: ci/gha_overall_status_manylinux2014 diff --git a/.github/workflows/ubuntu_22.yml b/.github/workflows/ubuntu_22.yml index d749164abbefd0..5aed74bbb242b8 100644 --- a/.github/workflows/ubuntu_22.yml +++ b/.github/workflows/ubuntu_22.yml @@ -311,8 +311,8 @@ jobs: with: runner: 'aks-linux-4-cores-16gb' container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_22_04_x64 }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test TensorFlow_Layer_Tests: name: TensorFlow Layer Tests diff --git a/.github/workflows/ubuntu_24.yml b/.github/workflows/ubuntu_24.yml index 2c76149ecdcb94..25be095e692d35 100644 --- a/.github/workflows/ubuntu_24.yml +++ b/.github/workflows/ubuntu_24.yml @@ -143,8 +143,8 @@ jobs: with: runner: 'aks-linux-4-cores-16gb' container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_24_04_x64 }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.12' - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test Pytorch_Layer_Tests: name: Pytorch Layer Tests From 9ff59428f8badfe27f26bef32f3b0b4f986e3608 Mon Sep 17 00:00:00 2001 From: Edward Shogulin Date: Wed, 18 Dec 2024 10:03:36 +0000 Subject: [PATCH 08/60] [CPU] [ARM] [INT8] FullyConnected (#25171) ### Details: - *[ARM] [INT8] FullyConnected* ### Tickets: - *CVS-149494* --------- Co-authored-by: Aleksandr Voron --- .../src/network_helper.cpp | 2 +- .../convert_fc_to_quantized_legacy.cpp | 5 +- .../intel_cpu/src/dnnl_postops_composer.cpp | 47 +-- .../executors/acl/acl_common_executor.cpp | 24 +- .../executors/acl/acl_common_executor.hpp | 5 +- .../executors/acl/acl_fullyconnected.cpp | 318 +-------------- .../executors/acl/acl_fullyconnected.hpp | 38 +- .../acl/acl_fullyconnected_utils.cpp | 367 ++++++++++++++++++ .../acl/acl_fullyconnected_utils.hpp | 81 ++++ .../executors/acl/acl_lowp_fullyconnected.cpp | 151 +++++++ .../executors/acl/acl_lowp_fullyconnected.hpp | 51 +++ .../nodes/executors/common/common_utils.hpp | 66 ++++ .../src/nodes/executors/debug_messages.hpp | 29 +- .../fullyconnected_implementations.cpp | 39 ++ .../intel_cpu/tests/functional/CMakeLists.txt | 4 +- .../fully_connected_transformation.cpp | 113 ++++++ .../{ => x64}/add_transformation.cpp | 0 .../assign_and_read_value_transformation.cpp | 0 .../batch_to_space_transformation.cpp | 0 .../{ => x64}/clamp_transformation.cpp | 0 .../{ => x64}/concat_transformation.cpp | 0 .../concat_with_child_and_output.cpp | 0 ...t_with_different_precision_on_children.cpp | 0 ...oncat_with_intermediate_transformation.cpp | 0 .../concat_with_neighbors_transformation.cpp | 0 .../concat_with_split_transformation.cpp | 0 ...nvolution_backprop_data_transformation.cpp | 0 .../convolution_qdq_transformation.cpp | 0 .../{ => x64}/convolution_transformation.cpp | 0 .../depth_to_space_transformation.cpp | 0 ...ntwise_branch_selection_transformation.cpp | 0 ...eliminate_fake_quantize_transformation.cpp | 0 .../fq_and_avg_pool_transformation.cpp | 0 .../fq_and_max_pool_transformation.cpp | 0 ...d_two_output_branches_with_convolution.cpp | 0 .../fq_precision_selection_transformation.cpp | 0 .../{ => x64}/fq_transformation.cpp | 0 .../fq_with_dq_not_optimal_transformation.cpp | 0 .../fully_connected_transformation.cpp | 38 +- .../{ => x64}/fuse_convert_transformation.cpp | 0 .../fuse_dequantize_to_fq_transformation.cpp | 0 ...fuse_fq_and_scale_shift_transformation.cpp | 0 .../fuse_multiply_to_fq_transformation.cpp | 0 .../fuse_subtract_to_fq_transformation.cpp | 0 .../{ => x64}/gather_transformation.cpp | 0 .../{ => x64}/gemm_transformation.cpp | 0 .../group_convolution_transformation.cpp | 0 .../groupconvolution_qdq_transformation.cpp | 0 .../{ => x64}/interpolate_transformation.cpp | 0 .../{ => x64}/mat_mul_transformation.cpp | 0 .../mat_mul_with_constant_transformation.cpp | 0 .../mat_mul_with_optimized_constant_fq.cpp | 0 .../move_fake_quantize_transformation.cpp | 0 .../multiply_to_group_convolution.cpp | 0 .../{ => x64}/multiply_transformation.cpp | 0 .../{ => x64}/multiply_with_one_parent.cpp | 0 .../{ => x64}/mvn_transformation.cpp | 0 .../{ => x64}/normalize_transformation.cpp | 0 .../{ => x64}/output_layers.cpp | 0 .../{ => x64}/output_layers_concat.cpp | 0 .../output_layers_concat_multi_channel.cpp | 0 .../{ => x64}/pad_transformation.cpp | 0 .../{ => x64}/prelu_transformation.cpp | 0 .../pull_reshape_through_dequantization.cpp | 0 .../recurrent_cell_transformation.cpp | 0 .../{ => x64}/reduce_max_transformation.cpp | 0 .../{ => x64}/reduce_mean_transformation.cpp | 0 .../{ => x64}/reduce_min_transformation.cpp | 0 .../{ => x64}/reduce_sum_transformation.cpp | 0 .../{ => x64}/relu_transformation.cpp | 0 .../{ => x64}/reshape_transformation.cpp | 0 .../shuffle_channels_transformation.cpp | 0 .../space_to_batch_transformation.cpp | 0 .../{ => x64}/split_transformation.cpp | 0 .../{ => x64}/squeeze_transformation.cpp | 0 .../strided_slice_transformation.cpp | 0 .../subtract_multiply_to_multiply_add.cpp | 0 .../{ => x64}/subtract_transformation.cpp | 0 .../transpose_after_matmul_transformation.cpp | 0 .../{ => x64}/transpose_transformation.cpp | 0 .../{ => x64}/unsqueeze_transformation.cpp | 0 .../variadic_split_transformation.cpp | 0 .../skip_tests_config.cpp | 5 +- .../fully_connected_transformation.cpp | 31 +- .../fully_connected_transformation.hpp | 18 +- .../fully_connected_transformation.cpp | 55 ++- .../layer_transformation.hpp | 2 + .../layer_transformation.cpp | 24 +- .../include/ov_lpt_models/mat_mul.hpp | 14 +- .../ov_helpers/ov_lpt_models/src/mat_mul.cpp | 132 +++++-- 90 files changed, 1181 insertions(+), 478 deletions(-) create mode 100644 src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp create mode 100644 src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp create mode 100644 src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp create mode 100644 src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.hpp create mode 100644 src/plugins/intel_cpu/src/nodes/executors/common/common_utils.hpp create mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/aarch64/fully_connected_transformation.cpp rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/add_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/assign_and_read_value_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/batch_to_space_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/clamp_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/concat_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/concat_with_child_and_output.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/concat_with_different_precision_on_children.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/concat_with_intermediate_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/concat_with_neighbors_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/concat_with_split_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/convolution_backprop_data_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/convolution_qdq_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/convolution_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/depth_to_space_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/elementwise_branch_selection_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/eliminate_fake_quantize_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fq_and_avg_pool_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fq_and_max_pool_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fq_and_two_output_branches_with_convolution.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fq_precision_selection_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fq_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fq_with_dq_not_optimal_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fully_connected_transformation.cpp (55%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fuse_convert_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fuse_dequantize_to_fq_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fuse_fq_and_scale_shift_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fuse_multiply_to_fq_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/fuse_subtract_to_fq_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/gather_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/gemm_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/group_convolution_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/groupconvolution_qdq_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/interpolate_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/mat_mul_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/mat_mul_with_constant_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/mat_mul_with_optimized_constant_fq.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/move_fake_quantize_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/multiply_to_group_convolution.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/multiply_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/multiply_with_one_parent.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/mvn_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/normalize_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/output_layers.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/output_layers_concat.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/output_layers_concat_multi_channel.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/pad_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/prelu_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/pull_reshape_through_dequantization.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/recurrent_cell_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/reduce_max_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/reduce_mean_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/reduce_min_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/reduce_sum_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/relu_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/reshape_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/shuffle_channels_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/space_to_batch_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/split_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/squeeze_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/strided_slice_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/subtract_multiply_to_multiply_add.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/subtract_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/transpose_after_matmul_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/transpose_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/unsqueeze_transformation.cpp (100%) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/{ => x64}/variadic_split_transformation.cpp (100%) diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp index 1aebfb111d7892..01bdadf59e35c8 100644 --- a/src/common/low_precision_transformations/src/network_helper.cpp +++ b/src/common/low_precision_transformations/src/network_helper.cpp @@ -1897,4 +1897,4 @@ bool NetworkHelper::checkConstantNotInf(const std::shared_ptr constant_nod } } // namespace low_precision } // namespace pass -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp b/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp index 908e36a51a7eb9..a0c16e24f04bad 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp @@ -24,7 +24,7 @@ ov::pass::ConvertFCToFCQuantizedLegacy::ConvertFCToFCQuantizedLegacy() { std::vector weights_types{ov::element::i8}; auto activations_m = pattern::any_input(ov::pass::pattern::type_matches_any(activation_types)); - auto weights_m = wrap_type(ov::pass::pattern::type_matches_any(weights_types)); + auto weights_m = pattern::any_input(); auto bias_m = pattern::any_input(); auto fully_connected_m = wrap_type({activations_m, weights_m, bias_m}); @@ -43,7 +43,8 @@ ov::pass::ConvertFCToFCQuantizedLegacy::ConvertFCToFCQuantizedLegacy() { const auto& fc_output_shape = fc_output.get_partial_shape(); const auto& multiply_output_shape = multiply.get_partial_shape(); - if (*fc_output_shape.rbegin() != *multiply_output_shape.rbegin()) { + if (*fc_output_shape.rbegin() != *multiply_output_shape.rbegin() || + !ov::op::util::is_on_constant_path(weights)) { return false; } diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp index 7d62e5cb6b673d..9b86a1433acb06 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp @@ -13,6 +13,7 @@ #include "cpu_types.h" #include "memory_desc/dnnl_blocked_memory_desc.h" +#include "nodes/executors/common/common_utils.hpp" #include "nodes/executors/memory_arguments.hpp" #include "openvino/core/type/element_type.hpp" #include "utils/cpu_utils.hpp" @@ -21,52 +22,6 @@ namespace ov { namespace intel_cpu { -static std::vector getDeQuantizedScales(const MemoryArgs& memory) { - if (!memory.count(ARG_DST_DEQ_SCALE)) - return {}; - - auto scalesMemory = memory.at(ARG_DST_DEQ_SCALE); - - auto scalesData = static_cast(scalesMemory->getData()); - - if (!scalesData) - return {}; - - auto dstShape = memory.at(ARG_DST)->getShape(); - auto dqScalesShape = scalesMemory->getShape(); - - auto scalesDims = getNormalizedDimsBySize(dqScalesShape.getDims(), dstShape.getDims().size()); - - auto scaleSize = std::accumulate(scalesDims.begin(), scalesDims.end(), std::size_t(1), std::multiplies()); - - std::vector DQScales(scaleSize, 1.0); - - OPENVINO_ASSERT(scaleSize == 1 || DQScales.size() == 1 || DQScales.size() == scaleSize, - "set invalid scales size , DQScales vector size: ", - DQScales.size(), - ", scale data size: ", - scaleSize); - - // @todo do we really need to broadcast dq scales and then resize them back? - if (scaleSize > DQScales.size()) - DQScales.resize(scaleSize, DQScales[0]); - if (1 == scaleSize) { - std::transform(DQScales.begin(), DQScales.end(), DQScales.begin(), [=](float val) { - return (scalesData[0] * val); - }); - } else { - for (size_t i = 0; i < DQScales.size(); i++) { - DQScales[i] *= scalesData[i]; - } - } - if (std::all_of(DQScales.begin(), DQScales.end(), [&](float val) { - return (val == DQScales[0]); - })) - DQScales.resize(1); - - return DQScales; -} - DnnlPostOpsComposer::DnnlPostOpsComposer(const PostOps& postOps, const dnnl::engine& engine, const VectorDims& outputDims, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.cpp index 4804c7b4efe252..23933d7e7563b3 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.cpp @@ -11,14 +11,13 @@ namespace ov { namespace intel_cpu { -static const std::unordered_map argConvert = { - {ARG_SRC_0, ACL_SRC_0}, - {ARG_SRC_1, ACL_SRC_1}, - {ARG_SRC_2, ACL_SRC_2}, - {ARG_BIAS, ACL_BIAS}, - {ARG_WEI, ACL_WEI}, - {ARG_DST, ACL_DST}, -}; +static const std::unordered_map argConvert = {{ARG_SRC_0, ACL_SRC_0}, + {ARG_SRC_1, ACL_SRC_1}, + {ARG_SRC_2, ACL_SRC_2}, + {ARG_BIAS, ACL_BIAS}, + {ARG_WEI, ACL_WEI}, + {ARG_DST, ACL_DST}, + {ARG_DST_DEQ_SCALE, ACL_DST_DEQ_SCALE}}; using ACLTypes = std::array; using ACLLayouts = std::array; @@ -39,9 +38,9 @@ static void initACLTensorParams(const MemoryPtr& memoryPtr, } } -static std::shared_ptr initTensorInfo(const arm_compute::TensorShape& tensorShape, - const arm_compute::DataType& dataType, - const arm_compute::DataLayout& dataLayout) { +std::shared_ptr ACLCommonExecutor::initTensorInfo(const arm_compute::TensorShape& tensorShape, + const arm_compute::DataType& dataType, + const arm_compute::DataLayout& dataLayout) { std::shared_ptr aclMemoryInfo = nullptr; if (dataType != arm_compute::DataType::UNKNOWN) { aclMemoryInfo = std::make_shared(tensorShape, 1, dataType, dataLayout); @@ -70,6 +69,9 @@ bool ACLCommonExecutor::update(const MemoryArgs& memory) { ACLTypes aclDataType{}; ACLLayouts aclDataLayout{}; for (auto& cpu_mem_ptr : memory) { + if (cpu_mem_ptr.second->getSize() == 0) { + continue; + } const ACLArgs index = argConvert.at(cpu_mem_ptr.first); initACLTensorParams(cpu_mem_ptr.second, aclTensorAttrs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.hpp index 94c5dbe219aae8..650fc5b8c2c7e8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_common_executor.hpp @@ -11,7 +11,7 @@ namespace ov { namespace intel_cpu { -enum ACLArgs { ACL_SRC_0, ACL_SRC_1, ACL_SRC_2, ACL_BIAS, ACL_WEI, ACL_DST, COUNT_OF_ARGS }; +enum ACLArgs { ACL_SRC_0, ACL_SRC_1, ACL_SRC_2, ACL_BIAS, ACL_WEI, ACL_DST, ACL_DST_DEQ_SCALE, COUNT_OF_ARGS }; using ACLFunction = std::unique_ptr; using ACLShapes = std::array; @@ -42,6 +42,9 @@ class ACLCommonExecutor : public Executor { protected: ACLTensorAttrs aclTensorAttrs; + virtual std::shared_ptr initTensorInfo(const arm_compute::TensorShape& tensorShape, + const arm_compute::DataType& dataType, + const arm_compute::DataLayout& dataLayout); private: ACLTensors aclMemoryTensors; diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp index 74bdb97cdf2a8c..e4dbb1a3a37940 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp @@ -24,258 +24,6 @@ namespace ov { namespace intel_cpu { -static VectorDims makeDummyInputDims(const Shape& inShape, const Shape& wShape) { - const auto& weightDims = wShape.getStaticDims(); - - auto inMinDims = inShape.getMinDims(); - auto inMaxDims = inShape.getMaxDims(); - inMinDims.back() = weightDims.back(); - inMaxDims.back() = weightDims.back(); - - return MemoryDescUtils::makeDummyShape(Shape(inMinDims, inMaxDims)).getStaticDims(); -} - -static VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDims& wShape, const size_t out_rank) { - size_t activationRank = inShape.size(); - size_t channelRank = wShape.size() - 1; - // activation weight output_shape - // NCHW CoCHW NCo - // TNC CoC TNCo - // NC CoC NCo - VectorDims outputShape(out_rank, 1); - // set Co - outputShape.back() = wShape[0]; - // set batch dims - size_t batchRank = activationRank - channelRank; - size_t startIdx = out_rank - batchRank - 1; - for (size_t i = 0; i < batchRank; i++) { - outputShape[i + startIdx] = inShape[i]; - } - - return outputShape; -} - -static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc) { - const auto& weiDesc = srcDesc->getDnnlDesc(); - const auto reorderedWeiDesc = - dnnl::memory::desc{weiDesc.get_dims(), weiDesc.get_data_type(), dnnl::memory::format_tag::ba}; - const auto transposedWeiDesc = reorderedWeiDesc.reshape(dstDesc->getDnnlDesc().get_dims()); - - return DnnlExtensionUtils::makeDescriptor(transposedWeiDesc); -} - -static ov::optional convertWeightPrecision(MemoryPtr input, - MemoryPtr output, - ov::element::Type weightPrecision) { - MemoryArgs memoryArgs; - memoryArgs[ARG_SRC] = input; - memoryArgs[ARG_DST] = output; - - auto aclWeightsConverter = std::make_shared(); - if (aclWeightsConverter->update(memoryArgs)) { - aclWeightsConverter->execute(memoryArgs); - return ov::optional(memoryArgs.at(ARG_DST)); - } - - if (!node::Convert::isSupportedDesc(input->getDesc()) || !node::Convert::isSupportedDesc(output->getDesc())) { - return {}; - } - - auto data = static_cast(input->getData()); - std::vector tmpBuff; - tmpBuff.resize(output->getSize()); - cpu_convert(data, - tmpBuff.data(), - DnnlExtensionUtils::DataTypeToElementType(input->getDataType()), - weightPrecision, - input->getSize() / input->getDesc().getPrecision().size()); - - return ov::optional(std::make_shared(output->getPrimitive().get_engine(), - output->getDesc().cloneWithNewPrecision(weightPrecision), - tmpBuff.data())); -} - -static ov::optional reorderDataFallback(MemoryPtr input, MemoryPtr output, ExecutorContext::CPtr context) { - if (output->getDataType() == input->getDataType()) { - return {}; - } - const auto inPrc = DnnlExtensionUtils::DataTypeToElementType(input->getDataType()); - auto convertedDstMemoryDesc = output->getDesc().cloneWithNewPrecision(inPrc); - dnnl::reorder reorderWithoutConvert = - getReorderPrim(context->getRuntimeCache(), - output->getPrimitive().get_engine(), - input->getPrimitive().get_desc(), - MemoryDescUtils::convertToDnnlMemoryDesc(convertedDstMemoryDesc)->getDnnlDesc()); - - if (reorderWithoutConvert && - parse_impl_name(reorderWithoutConvert.get_primitive_desc()->impl()->name()) != ref_any) { - auto convertOutput = convertWeightPrecision(input, output, inPrc); - if (!convertOutput) { - return {}; - } - input = *convertOutput; - - if (reorderWithoutConvert) { - dnnl::stream loc_stream(output->getPrimitive().get_engine(), dnnl::stream::flags::in_order); - reorderWithoutConvert.execute( - loc_stream, - {{DNNL_ARG_FROM, input->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); - return ov::optional(output); - } - } - return {}; -} - -static MemoryPtr reorderData(DnnlMemoryDescPtr srcWeightDesc, - DnnlMemoryDescPtr dstWeightDesc, - MemoryCPtr weightsMem, - ExecutorContext::CPtr context) { - MemoryPtr input = std::make_shared(context->getEngine(), srcWeightDesc, weightsMem->getData()); - MemoryPtr output = std::make_shared(context->getEngine(), dstWeightDesc); - if (!input->getDesc().isDefined() || !output->getDesc().isDefined()) - OPENVINO_THROW("Can't reorder data with dynamic shapes"); - - if (input->getShape().hasZeroDims() || output->getShape().hasZeroDims()) { - return output; - } - - if (input->getDesc().isCompatible(output->getDesc())) { - auto srcPtr = static_cast(input->getData()); - auto dstPtr = static_cast(output->getData()); - auto copySize = output->getSize(); - cpu_memcpy(dstPtr, srcPtr, copySize); - return output; - } - - // try directly reorder - auto engine = output->getPrimitive().get_engine(); - dnnl::reorder directReorder = getReorderPrim(context->getRuntimeCache(), - engine, - input->getPrimitive().get_desc(), - output->getPrimitive().get_desc()); - - if (!directReorder || parse_impl_name(directReorder.get_primitive_desc()->impl()->name()) == ref_any) { - // try precision conversion then do the reorder - auto fallbackOutput = reorderDataFallback(input, output, context); - if (fallbackOutput) { - return *fallbackOutput; - } - } - // if precision conversion does not work then do direct reference reorder - if (directReorder) { - dnnl::stream loc_stream(engine, dnnl::stream::flags::in_order); - directReorder.execute(loc_stream, - {{DNNL_ARG_FROM, input->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); - } else { - OPENVINO_THROW("Could not make onednn reorder."); - } - return output; -} - -static MemoryPtr reorderWeights(const MemoryArgs& memory, - const ExecutorContext::CPtr context, - ACLFCAttrs& aclfcAttrs, - DnnlMemoryDescPtr dnnlSrcDesc, - DnnlMemoryDescPtr dnnlDstDesc) { - auto create = [&]() { - MemoryPtr weightsMemory = memory.at(ARG_WEI); - if (aclfcAttrs.isWeightsRepacked || aclfcAttrs.isConvertedWeights) { - weightsMemory = reorderData(dnnlSrcDesc, dnnlDstDesc, memory.at(ARG_WEI), context); - DEBUG_LOG("ACLFullyConnectedExecutor: cache miss, perform packing"); - } - return weightsMemory; - }; - - auto weightCache = context->getWeightsCache(); - if (weightCache != nullptr) { - const auto& wgtDims = memory.at(ARG_WEI)->getStaticDims(); - const auto N = wgtDims[0]; - const auto K = wgtDims[1]; - std::string format = "fc_acl_" + std::to_string(N) + "_" + std::to_string(K); - const std::string string_hash = format + "_" + std::to_string(memory.at(ARG_WEI)->getSize()) + "_" + - std::to_string(reinterpret_cast(memory.at(ARG_WEI)->getData())); - DEBUG_LOG("ACLFullyConnectedExecutor: findOrCreate, string_hash: ", string_hash); - return *weightCache->findOrCreate(string_hash, create); - } - - DEBUG_LOG("ACLFullyConnectedExecutor: Weights cache is not available"); - return create(); -} - -static MemoryPtr prepareWeightMemory(const MemoryArgs& memory, - const ExecutorContext::CPtr context, - const FCAttrs& attrs, - ACLFCAttrs& aclfcAttrs, - const PostOps& postOps, - arm_compute::WeightFormat& expectedWeightFormat, - arm_compute::TensorInfo& weiTensorInfo) { - MemoryArgs memoryArgs; - memoryArgs[ARG_BIAS] = memory.at(ARG_BIAS); - memoryArgs[ARG_WEI] = memory.at(ARG_WEI); - - auto originalWeightsDesc = memory.at(ARG_WEI)->getDescPtr(); - - // normalize weights to 2D - const auto& wgtDims = originalWeightsDesc->getShape().getStaticDims(); - const VectorDims wgtDims2D = reshapeDownToRank<2>(wgtDims); - - originalWeightsDesc = std::make_shared(originalWeightsDesc->getPrecision(), Shape{wgtDims2D}); - - auto dnnlSrcDesc = MemoryDescUtils::convertToDnnlMemoryDesc(originalWeightsDesc); - auto dstDesc = originalWeightsDesc->cloneWithNewPrecision(aclfcAttrs.inputPrecision); - auto dnnlDstDesc = MemoryDescUtils::convertToDnnlMemoryDesc(dstDesc); - - if (memory.at(ARG_SRC_0)->getShape().isDynamic()) { - const auto& inShape = memory.at(ARG_SRC_0)->getShape(); - const auto& wShape = originalWeightsDesc->getShape(); - const auto& inDymmyDims = makeDummyInputDims(inShape, wShape); - const auto& outDymmyDims = - makeDummyOutputDims(inDymmyDims, wShape.getStaticDims(), memory.at(ARG_DST)->getShape().getRank()); - memoryArgs[ARG_SRC_0] = - std::make_shared(context->getEngine(), - memory.at(ARG_SRC_0)->getDescPtr()->cloneWithNewDims(inDymmyDims)); - memoryArgs[ARG_DST] = - std::make_shared(context->getEngine(), - memory.at(ARG_DST)->getDescPtr()->cloneWithNewDims(outDymmyDims)); - } else { - memoryArgs[ARG_SRC_0] = memory.at(ARG_SRC_0); - memoryArgs[ARG_DST] = memory.at(ARG_DST); - } - - // TODO: ACLWeightFormatGenerator should be replaced with Reorder executor - // that calls ACL NEReorder + NETranspose or dnnl::reorder depending on backend availability - auto aclWeightsRepack = std::make_shared(attrs, postOps, memoryArgs); - bool isNeededReorder = aclWeightsRepack->update(memoryArgs); - expectedWeightFormat = - isNeededReorder ? aclWeightsRepack->getOptImplWeightFormat() : arm_compute::WeightFormat::UNSPECIFIED; - weiTensorInfo = aclWeightsRepack->getTensorInfo(ACLArgs::ACL_WEI); - - if (isNeededReorder) { - dnnl::impl::dim_t o_dim = 0; - dnnl::impl::dim_t inner_dim = 1; - std::vector remaining_dims = {}; - auto weights_md_ = dnnlDstDesc->getDnnlDesc().get(); - dnnl::impl::cpu::acl::acl_utils::reorder_to_weight_format(weiTensorInfo, - *weights_md_, - expectedWeightFormat, - inner_dim, - o_dim, - remaining_dims, - {}); - if (aclfcAttrs.weightsNonTransposed) { - dnnlSrcDesc = makeTransposedWeightDescriptor(dnnlSrcDesc, dnnlDstDesc); - } - aclfcAttrs.isWeightsRepacked = true; - return reorderWeights(memory, context, aclfcAttrs, dnnlSrcDesc, dnnlDstDesc); - } - if (!aclfcAttrs.weightsNonTransposed) { - dnnlDstDesc = makeTransposedWeightDescriptor(dnnlDstDesc, dnnlSrcDesc); - aclfcAttrs.isWeightsRepacked = true; - } - return reorderWeights(memory, context, aclfcAttrs, dnnlSrcDesc, dnnlDstDesc); -} - static bool checkPostOps(const PostOps& postOps) { if (postOps.empty()) { return true; @@ -321,8 +69,13 @@ ACLFullyConnectedExecutor::ACLFullyConnectedExecutor(const FCAttrs& attrs, const MemoryArgs& memory, const ExecutorContext::CPtr context) { initFCAttrs(attrs, aclTensorAttrs, aclfcAttrs, memory, fullyConnectedLayerInfo, postOps); - packedWeights = - prepareWeightMemory(memory, context, attrs, aclfcAttrs, postOps, expectedWeightFormat, weiTensorInfo); + packedWeights = acl_fc_executor::prepareWeightMemory(memory, + context, + attrs, + aclfcAttrs, + postOps, + expectedWeightFormat, + weiTensorInfo); } bool ACLFullyConnectedExecutor::supports(const FCConfig& config) { @@ -335,20 +88,8 @@ bool ACLFullyConnectedExecutor::supports(const FCConfig& config) { return true; } -static arm_compute::TensorShape normalizeDimsTo2D(const arm_compute::TensorShape shape) { - size_t norm_dim = std::accumulate(shape.begin() + 1, shape.end(), 1, std::multiplies()); - return arm_compute::TensorShape(shape[0], norm_dim); -} - -static void updateFCTensorsShapes(ACLShapes& aclMemoryShapes) { - aclMemoryShapes[ACLArgs::ACL_WEI] = normalizeDimsTo2D(aclMemoryShapes[ACLArgs::ACL_WEI]); - aclMemoryShapes[ACLArgs::ACL_SRC_0] = normalizeDimsTo2D(aclMemoryShapes[ACLArgs::ACL_SRC_0]); - aclMemoryShapes[ACLArgs::ACL_DST] = normalizeDimsTo2D(aclMemoryShapes[ACLArgs::ACL_DST]); - std::swap(aclMemoryShapes[ACLArgs::ACL_WEI][0], aclMemoryShapes[ACLArgs::ACL_WEI][1]); -} - void ACLFullyConnectedExecutor::updateTensorsShapes(ACLShapes& aclMemoryShapes) { - updateFCTensorsShapes(aclMemoryShapes); + acl_fc_executor::updateFCTensorsShapes(aclMemoryShapes); } arm_compute::Status ACLFullyConnectedExecutor::validateTensorsInfo(const ACLInfos& aclMemoryInfos) { @@ -387,48 +128,5 @@ ACLFunction ACLFullyConnectedExecutor::configureFunction(const ACLTensors& aclMe return neFC; } -arm_compute::Status acl_fc_executor::ACLWeightsConverter::validateTensorsInfo(const ACLInfos& aclMemoryInfos) { - return arm_compute::NECast::validate(aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), - aclMemoryInfos[ACLArgs::ACL_DST].get(), - arm_compute::ConvertPolicy::SATURATE); -} - -ACLFunction acl_fc_executor::ACLWeightsConverter::configureFunction(const ACLTensors& aclMemoryTensors) { - auto neCast = std::make_unique(); - neCast->configure(aclMemoryTensors[ACLArgs::ACL_SRC_0].get(), - aclMemoryTensors[ACLArgs::ACL_DST].get(), - arm_compute::ConvertPolicy::SATURATE); - return neCast; -} - -acl_fc_executor::ACLWeightFormatGenerator::ACLWeightFormatGenerator(const FCAttrs& attrs, - const PostOps& postOps, - const MemoryArgs& memory) { - initFCAttrs(attrs, aclTensorAttrs, aclfcAttrs, memory, fullyConnectedLayerInfo, postOps); -} - -void acl_fc_executor::ACLWeightFormatGenerator::updateTensorsShapes(ACLShapes& aclMemoryShapes) { - updateFCTensorsShapes(aclMemoryShapes); -} - -arm_compute::Status acl_fc_executor::ACLWeightFormatGenerator::validateTensorsInfo(const ACLInfos& aclMemoryInfos) { - if (aclfcAttrs.isConvertedWeights) { - aclMemoryInfos[ACLArgs::ACL_WEI]->set_data_type(aclMemoryInfos[ACLArgs::ACL_SRC_0]->data_type()); - } - int icTotal = aclMemoryInfos[ACLArgs::ACL_SRC_0]->dimension(0); - return arm_compute::NEFullyConnectedLayer::has_opt_impl( - expectedWeightFormat, - aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), - aclMemoryInfos[ACLArgs::ACL_WEI].get(), - aclMemoryInfos[ACLArgs::ACL_BIAS].get(), - aclMemoryInfos[ACLArgs::ACL_DST].get(), - fullyConnectedLayerInfo, - arm_compute::WeightsInfo(false, 1, 1, icTotal, false, arm_compute::WeightFormat::ANY)); -} - -ACLFunction acl_fc_executor::ACLWeightFormatGenerator::configureFunction(const ACLTensors& aclMemoryTensors) { - return std::make_unique(); -} - } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp index afeb4a5ce45c95..6c1a2f0576e283 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp @@ -1,50 +1,16 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once #include "acl_common_executor.hpp" +#include "acl_fullyconnected_utils.hpp" #include "nodes/executors/fullyconnected_config.hpp" namespace ov { namespace intel_cpu { -struct ACLFCAttrs { - ov::element::Type inputPrecision; - bool isConvertedWeights = false; - bool isWeightsRepacked = false; - bool weightsNonTransposed; -}; - -namespace acl_fc_executor { - -class ACLWeightsConverter : public ACLCommonExecutor { -public: - ACLWeightsConverter() = default; - void updateTensorsShapes(ACLShapes& aclMemoryShapes) override {} - arm_compute::Status validateTensorsInfo(const ACLInfos& aclMemoryInfos) override; - ACLFunction configureFunction(const ACLTensors& aclMemoryTensors) override; -}; - -class ACLWeightFormatGenerator : public ACLCommonExecutor { -public: - ACLWeightFormatGenerator(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory); - void updateTensorsShapes(ACLShapes& aclMemoryShapes) override; - arm_compute::Status validateTensorsInfo(const ACLInfos& aclMemoryInfos) override; - ACLFunction configureFunction(const ACLTensors& aclMemoryTensors) override; - arm_compute::WeightFormat getOptImplWeightFormat() { - return expectedWeightFormat; - } - -private: - arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo; - ACLFCAttrs aclfcAttrs; - arm_compute::WeightFormat expectedWeightFormat; -}; - -} // namespace acl_fc_executor - class ACLFullyConnectedExecutor : public ACLCommonExecutor { public: ACLFullyConnectedExecutor(const FCAttrs& attrs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp new file mode 100644 index 00000000000000..0c3e208381497f --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp @@ -0,0 +1,367 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +#include "acl_fullyconnected.hpp" +#include "acl_utils.hpp" +#include "memory_desc/cpu_memory_desc_utils.h" +#include "nodes/common/cpu_convert.h" +#include "nodes/common/cpu_memcpy.h" +#include "nodes/common/reorder_prim.h" +#include "nodes/convert.h" +#include "nodes/executors/executor.hpp" +#include "nodes/executors/memory_arguments.hpp" +#include "utils/cpu_utils.hpp" +#include "utils/debug_capabilities.h" + +namespace ov { +namespace intel_cpu { + +VectorDims acl_fc_executor::makeDummyInputDims(const Shape& inShape, const Shape& wShape) { + const auto& weightDims = wShape.getStaticDims(); + + auto inMinDims = inShape.getMinDims(); + auto inMaxDims = inShape.getMaxDims(); + inMinDims.back() = weightDims.back(); + inMaxDims.back() = weightDims.back(); + + return MemoryDescUtils::makeDummyShape(Shape(inMinDims, inMaxDims)).getStaticDims(); +} + +VectorDims acl_fc_executor::makeDummyOutputDims(const VectorDims& inShape, + const VectorDims& wShape, + const size_t out_rank) { + size_t activationRank = inShape.size(); + size_t channelRank = wShape.size() - 1; + // activation weight output_shape + // NCHW CoCHW NCo + // TNC CoC TNCo + // NC CoC NCo + VectorDims outputShape(out_rank, 1); + // set Co + outputShape.back() = wShape[0]; + // set batch dims + size_t batchRank = activationRank - channelRank; + size_t startIdx = out_rank - batchRank - 1; + for (size_t i = 0; i < batchRank; i++) { + outputShape[i + startIdx] = inShape[i]; + } + + return outputShape; +} + +DnnlMemoryDescPtr acl_fc_executor::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, + const DnnlMemoryDescPtr dstDesc) { + const auto& weiDesc = srcDesc->getDnnlDesc(); + dnnl::memory::dims wgtDims2D = reshapeDownToRank<2>(weiDesc.get_dims()); + const auto reorderedWeiDesc = dnnl::memory::desc{wgtDims2D, weiDesc.get_data_type(), dnnl::memory::format_tag::ba}; + const auto transposedWeiDesc = reorderedWeiDesc.reshape(dstDesc->getDnnlDesc().get_dims()); + + return DnnlExtensionUtils::makeDescriptor(transposedWeiDesc); +} + +ov::optional acl_fc_executor::convertWeightPrecision(MemoryPtr input, + MemoryPtr output, + ov::element::Type weightPrecision) { + MemoryArgs memoryArgs; + memoryArgs[ARG_SRC] = input; + memoryArgs[ARG_DST] = output; + + auto aclWeightsConverter = std::make_shared(); + if (aclWeightsConverter->update(memoryArgs)) { + aclWeightsConverter->execute(memoryArgs); + return ov::optional(memoryArgs.at(ARG_DST)); + } + + if (!node::Convert::isSupportedDesc(input->getDesc()) || !node::Convert::isSupportedDesc(output->getDesc())) { + return {}; + } + + auto data = static_cast(input->getData()); + std::vector tmpBuff; + tmpBuff.resize(output->getSize()); + cpu_convert(data, + tmpBuff.data(), + DnnlExtensionUtils::DataTypeToElementType(input->getDataType()), + weightPrecision, + input->getSize() / input->getDesc().getPrecision().size()); + + return ov::optional(std::make_shared(output->getPrimitive().get_engine(), + output->getDesc().cloneWithNewPrecision(weightPrecision), + tmpBuff.data())); +} + +ov::optional acl_fc_executor::reorderDataFallback(MemoryPtr input, + MemoryPtr output, + ExecutorContext::CPtr context) { + if (output->getDataType() == input->getDataType()) { + return {}; + } + const auto inPrc = DnnlExtensionUtils::DataTypeToElementType(input->getDataType()); + auto convertedDstMemoryDesc = output->getDesc().cloneWithNewPrecision(inPrc); + dnnl::reorder reorderWithoutConvert = + getReorderPrim(context->getRuntimeCache(), + output->getPrimitive().get_engine(), + input->getPrimitive().get_desc(), + MemoryDescUtils::convertToDnnlMemoryDesc(convertedDstMemoryDesc)->getDnnlDesc()); + + if (reorderWithoutConvert && + parse_impl_name(reorderWithoutConvert.get_primitive_desc()->impl()->name()) != ref_any) { + auto convertOutput = convertWeightPrecision(input, output, inPrc); + if (!convertOutput) { + return {}; + } + input = *convertOutput; + + if (reorderWithoutConvert) { + dnnl::stream loc_stream(output->getPrimitive().get_engine(), dnnl::stream::flags::in_order); + reorderWithoutConvert.execute( + loc_stream, + {{DNNL_ARG_FROM, input->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); + return ov::optional(output); + } + } + return {}; +} + +MemoryPtr acl_fc_executor::reorderData(DnnlMemoryDescPtr srcWeightDesc, + DnnlMemoryDescPtr dstWeightDesc, + MemoryCPtr weightsMem, + ExecutorContext::CPtr context) { + MemoryPtr input = std::make_shared(context->getEngine(), srcWeightDesc, weightsMem->getData()); + MemoryPtr output = std::make_shared(context->getEngine(), dstWeightDesc); + if (!input->getDesc().isDefined() || !output->getDesc().isDefined()) + OPENVINO_THROW("Can't reorder data with dynamic shapes"); + + if (input->getShape().hasZeroDims() || output->getShape().hasZeroDims()) { + return output; + } + + if (input->getDesc().isCompatible(output->getDesc())) { + auto srcPtr = static_cast(input->getData()); + auto dstPtr = static_cast(output->getData()); + auto copySize = output->getSize(); + cpu_memcpy(dstPtr, srcPtr, copySize); + return output; + } + + // try directly reorder + auto engine = output->getPrimitive().get_engine(); + dnnl::reorder directReorder = getReorderPrim(context->getRuntimeCache(), + engine, + input->getPrimitive().get_desc(), + output->getPrimitive().get_desc()); + + if (!directReorder || parse_impl_name(directReorder.get_primitive_desc()->impl()->name()) == ref_any) { + // try precision conversion then do the reorder + auto fallbackOutput = reorderDataFallback(input, output, context); + if (fallbackOutput) { + return *fallbackOutput; + } + } + // if precision conversion does not work then do direct reference reorder + if (directReorder) { + dnnl::stream loc_stream(engine, dnnl::stream::flags::in_order); + directReorder.execute(loc_stream, + {{DNNL_ARG_FROM, input->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); + } else { + OPENVINO_THROW("Could not make onednn reorder."); + } + return output; +} + +MemoryPtr acl_fc_executor::reorderWeights(const MemoryArgs& memory, + const ExecutorContext::CPtr context, + ACLFCAttrs& aclfcAttrs, + DnnlMemoryDescPtr dnnlSrcDesc, + DnnlMemoryDescPtr dnnlDstDesc) { + auto create = [&]() { + MemoryPtr weightsMemory = memory.at(ARG_WEI); + if (aclfcAttrs.isWeightsRepacked || aclfcAttrs.isConvertedWeights) { + weightsMemory = reorderData(dnnlSrcDesc, dnnlDstDesc, memory.at(ARG_WEI), context); + DEBUG_LOG("ACLFullyConnectedExecutor: cache miss, perform packing"); + } + return weightsMemory; + }; + + auto weightCache = context->getWeightsCache(); + if (weightCache != nullptr) { + const auto& wgtDims = memory.at(ARG_WEI)->getStaticDims(); + const auto N = wgtDims[0]; + const auto K = wgtDims[1]; + std::string format = "fc_acl_" + std::to_string(N) + "_" + std::to_string(K); + const std::string string_hash = format + "_" + std::to_string(memory.at(ARG_WEI)->getSize()) + "_" + + std::to_string(reinterpret_cast(memory.at(ARG_WEI)->getData())); + DEBUG_LOG("ACLFullyConnectedExecutor: findOrCreate, string_hash: ", string_hash); + return *weightCache->findOrCreate(string_hash, create); + } + + DEBUG_LOG("ACLFullyConnectedExecutor: Weights cache is not available"); + return create(); +} + +MemoryPtr acl_fc_executor::prepareWeightMemory(const MemoryArgs& memory, + const ExecutorContext::CPtr context, + const FCAttrs& attrs, + ACLFCAttrs& aclfcAttrs, + const PostOps& postOps, + arm_compute::WeightFormat& expectedWeightFormat, + arm_compute::TensorInfo& weiTensorInfo) { + MemoryArgs memoryArgs; + memoryArgs[ARG_BIAS] = memory.at(ARG_BIAS); + memoryArgs[ARG_WEI] = memory.at(ARG_WEI); + + auto originalWeightsDesc = memory.at(ARG_WEI)->getDescPtr(); + // normalize weights to 2D + const auto& wgtDims = originalWeightsDesc->getShape().getStaticDims(); + const VectorDims wgtDims2D = reshapeDownToRank<2>(wgtDims); + originalWeightsDesc = std::make_shared(originalWeightsDesc->getPrecision(), Shape{wgtDims2D}); + auto dnnlSrcDesc = MemoryDescUtils::convertToDnnlMemoryDesc(originalWeightsDesc); + auto dstDesc = originalWeightsDesc->cloneWithNewPrecision(aclfcAttrs.inputPrecision); + auto dnnlDstDesc = MemoryDescUtils::convertToDnnlMemoryDesc(dstDesc); + + if (memory.at(ARG_SRC_0)->getShape().isDynamic()) { + const auto& inShape = memory.at(ARG_SRC_0)->getShape(); + const auto& wShape = originalWeightsDesc->getShape(); + const auto& inDymmyDims = makeDummyInputDims(inShape, wShape); + const auto& outDymmyDims = + makeDummyOutputDims(inDymmyDims, wShape.getStaticDims(), memory.at(ARG_DST)->getShape().getRank()); + memoryArgs[ARG_SRC_0] = + std::make_shared(context->getEngine(), + memory.at(ARG_SRC_0)->getDescPtr()->cloneWithNewDims(inDymmyDims)); + memoryArgs[ARG_DST] = + std::make_shared(context->getEngine(), + memory.at(ARG_DST)->getDescPtr()->cloneWithNewDims(outDymmyDims)); + } else { + memoryArgs[ARG_SRC_0] = memory.at(ARG_SRC_0); + memoryArgs[ARG_DST] = memory.at(ARG_DST); + } + // TODO: ACLWeightFormatGenerator should be replaced with Reorder executor + // that calls ACL NEReorder + NETranspose or dnnl::reorder depending on backend availability + auto aclWeightsRepack = std::make_shared(attrs, postOps, memoryArgs); + bool isNeededReorder = aclWeightsRepack->update(memoryArgs); + expectedWeightFormat = + isNeededReorder ? aclWeightsRepack->getOptImplWeightFormat() : arm_compute::WeightFormat::UNSPECIFIED; + weiTensorInfo = aclWeightsRepack->getTensorInfo(ACLArgs::ACL_WEI); + + if (isNeededReorder) { + dnnl::impl::dim_t o_dim = 0; + dnnl::impl::dim_t inner_dim = 1; + std::vector remaining_dims = {}; + auto weights_md_ = dnnlDstDesc->getDnnlDesc().get(); + dnnl::impl::cpu::acl::acl_utils::reorder_to_weight_format(weiTensorInfo, + *weights_md_, + expectedWeightFormat, + inner_dim, + o_dim, + remaining_dims, + {}); + if (aclfcAttrs.weightsNonTransposed) { + dnnlSrcDesc = makeTransposedWeightDescriptor(dnnlSrcDesc, dnnlDstDesc); + } + aclfcAttrs.isWeightsRepacked = true; + return reorderWeights(memory, context, aclfcAttrs, dnnlSrcDesc, dnnlDstDesc); + } + if (!aclfcAttrs.weightsNonTransposed) { + dnnlDstDesc = makeTransposedWeightDescriptor(dnnlDstDesc, dnnlSrcDesc); + aclfcAttrs.isWeightsRepacked = true; + } + return reorderWeights(memory, context, aclfcAttrs, dnnlSrcDesc, dnnlDstDesc); +} + +static bool checkPostOps(const PostOps& postOps) { + // Add postops + if (!postOps.empty() && postOps.size() == 1) { + if (const auto activation = std::dynamic_pointer_cast(postOps[0])) { + if (checkActivationLayerInfo(convertToEltwiseAlgorithm(activation->type()))) { + return true; + } + } + } + return false; +} + +static void initFCAttrs(const FCAttrs& attrs, + ACLTensorAttrs& aclTensorAttrs, + ACLFCAttrs& aclfcAttrs, + const MemoryArgs& memory, + arm_compute::FullyConnectedLayerInfo& fullyConnectedLayerInfo, + const PostOps& postOps) { + aclTensorAttrs.hasLayoutTypeNHWC = memory.at(ARG_SRC)->getDescPtr()->hasLayoutType(LayoutType::nspc); + fullyConnectedLayerInfo.weights_trained_layout = getAclDataLayoutByMemoryDesc(memory.at(ARG_WEI)->getDescPtr()); + aclfcAttrs.inputPrecision = memory.at(ARG_SRC)->getDescPtr()->getPrecision(); + fullyConnectedLayerInfo.transpose_weights = false; + aclfcAttrs.weightsNonTransposed = attrs.weightsNonTransposed; + + if (checkPostOps(postOps)) { + auto activation = std::dynamic_pointer_cast(postOps[0]); + fullyConnectedLayerInfo.activation_info = getActivationLayerInfo(convertToEltwiseAlgorithm(activation->type()), + activation->alpha(), + activation->beta(), + activation->gamma()); + } + + if (memory.at(ARG_SRC)->getPrecision() != memory.at(ARG_WEI)->getPrecision()) { + aclfcAttrs.isConvertedWeights = true; + } +} + +arm_compute::TensorShape acl_fc_executor::normalizeDimsTo2D(const arm_compute::TensorShape shape) { + size_t norm_dim = std::accumulate(shape.begin() + 1, shape.end(), 1, std::multiplies()); + return arm_compute::TensorShape(shape[0], norm_dim); +} + +void acl_fc_executor::updateFCTensorsShapes(ACLShapes& aclMemoryShapes) { + aclMemoryShapes[ACLArgs::ACL_WEI] = normalizeDimsTo2D(aclMemoryShapes[ACLArgs::ACL_WEI]); + aclMemoryShapes[ACLArgs::ACL_SRC_0] = normalizeDimsTo2D(aclMemoryShapes[ACLArgs::ACL_SRC_0]); + aclMemoryShapes[ACLArgs::ACL_DST] = normalizeDimsTo2D(aclMemoryShapes[ACLArgs::ACL_DST]); + std::swap(aclMemoryShapes[ACLArgs::ACL_WEI][0], aclMemoryShapes[ACLArgs::ACL_WEI][1]); +} + +arm_compute::Status acl_fc_executor::ACLWeightsConverter::validateTensorsInfo(const ACLInfos& aclMemoryInfos) { + return arm_compute::NECast::validate(aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), + aclMemoryInfos[ACLArgs::ACL_DST].get(), + arm_compute::ConvertPolicy::SATURATE); +} + +ACLFunction acl_fc_executor::ACLWeightsConverter::configureFunction(const ACLTensors& aclMemoryTensors) { + auto neCast = std::make_unique(); + neCast->configure(aclMemoryTensors[ACLArgs::ACL_SRC_0].get(), + aclMemoryTensors[ACLArgs::ACL_DST].get(), + arm_compute::ConvertPolicy::SATURATE); + return neCast; +} + +acl_fc_executor::ACLWeightFormatGenerator::ACLWeightFormatGenerator(const FCAttrs& attrs, + const PostOps& postOps, + const MemoryArgs& memory) { + initFCAttrs(attrs, aclTensorAttrs, aclfcAttrs, memory, fullyConnectedLayerInfo, postOps); +} + +void acl_fc_executor::ACLWeightFormatGenerator::updateTensorsShapes(ACLShapes& aclMemoryShapes) { + updateFCTensorsShapes(aclMemoryShapes); +} + +arm_compute::Status acl_fc_executor::ACLWeightFormatGenerator::validateTensorsInfo(const ACLInfos& aclMemoryInfos) { + if (aclfcAttrs.isConvertedWeights) { + aclMemoryInfos[ACLArgs::ACL_WEI]->set_data_type(aclMemoryInfos[ACLArgs::ACL_SRC_0]->data_type()); + } + int icTotal = aclMemoryInfos[ACLArgs::ACL_SRC_0]->dimension(0); + return arm_compute::NEFullyConnectedLayer::has_opt_impl( + expectedWeightFormat, + aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), + aclMemoryInfos[ACLArgs::ACL_WEI].get(), + aclMemoryInfos[ACLArgs::ACL_BIAS].get(), + aclMemoryInfos[ACLArgs::ACL_DST].get(), + fullyConnectedLayerInfo, + arm_compute::WeightsInfo(false, 1, 1, icTotal, false, arm_compute::WeightFormat::ANY)); +} + +ACLFunction acl_fc_executor::ACLWeightFormatGenerator::configureFunction(const ACLTensors& aclMemoryTensors) { + return std::make_unique(); +} + +} // namespace intel_cpu +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp new file mode 100644 index 00000000000000..686042f6067433 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp @@ -0,0 +1,81 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once +#include "acl_common_executor.hpp" +#include "nodes/executors/fullyconnected_config.hpp" +#include "ov_optional.hpp" + +namespace ov { +namespace intel_cpu { + +struct ACLFCAttrs { + ov::element::Type inputPrecision; + bool isConvertedWeights = false; + bool isWeightsRepacked = false; + bool weightsNonTransposed; +}; + +namespace acl_fc_executor { + +VectorDims makeDummyInputDims(const Shape& inShape, const Shape& wShape); + +VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDims& wShape, const size_t out_rank); + +DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, const DnnlMemoryDescPtr dstDesc); + +ov::optional convertWeightPrecision(MemoryPtr input, MemoryPtr output, ov::element::Type weightPrecision); + +ov::optional reorderDataFallback(MemoryPtr input, MemoryPtr output, ExecutorContext::CPtr context); + +MemoryPtr reorderData(DnnlMemoryDescPtr srcWeightDesc, + DnnlMemoryDescPtr dstWeightDesc, + MemoryCPtr weightsMem, + ExecutorContext::CPtr context); + +MemoryPtr reorderWeights(const MemoryArgs& memory, + const ExecutorContext::CPtr context, + ACLFCAttrs& aclfcAttrs, + DnnlMemoryDescPtr dnnlSrcDesc, + DnnlMemoryDescPtr dnnlDstDesc); + +MemoryPtr prepareWeightMemory(const MemoryArgs& memory, + const ExecutorContext::CPtr context, + const FCAttrs& attrs, + ACLFCAttrs& aclfcAttrs, + const PostOps& postOps, + arm_compute::WeightFormat& expectedWeightFormat, + arm_compute::TensorInfo& weiTensorInfo); + +arm_compute::TensorShape normalizeDimsTo2D(const arm_compute::TensorShape shape); + +void updateFCTensorsShapes(ACLShapes& aclMemoryShapes); + +class ACLWeightsConverter : public ACLCommonExecutor { +public: + ACLWeightsConverter() = default; + void updateTensorsShapes(ACLShapes& aclMemoryShapes) override {} + arm_compute::Status validateTensorsInfo(const ACLInfos& aclMemoryInfos) override; + ACLFunction configureFunction(const ACLTensors& aclMemoryTensors) override; +}; + +class ACLWeightFormatGenerator : public ACLCommonExecutor { +public: + ACLWeightFormatGenerator(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory); + void updateTensorsShapes(ACLShapes& aclMemoryShapes) override; + arm_compute::Status validateTensorsInfo(const ACLInfos& aclMemoryInfos) override; + ACLFunction configureFunction(const ACLTensors& aclMemoryTensors) override; + arm_compute::WeightFormat getOptImplWeightFormat() { + return expectedWeightFormat; + } + +private: + arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo; + arm_compute::WeightsInfo weightsInfo; + ACLFCAttrs aclfcAttrs; + arm_compute::WeightFormat expectedWeightFormat; +}; + +} // namespace acl_fc_executor +} // namespace intel_cpu +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp new file mode 100644 index 00000000000000..1604c4fff2f585 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp @@ -0,0 +1,151 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "acl_lowp_fullyconnected.hpp" + +#include "acl_fullyconnected_utils.hpp" +#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" +#include "memory_desc/cpu_memory_desc_utils.h" +#include "nodes/common/cpu_convert.h" +#include "nodes/executors/acl/acl_utils.hpp" +#include "nodes/executors/common/common_utils.hpp" +#include "nodes/executors/debug_messages.hpp" +#include "nodes/executors/executor.hpp" +#include "nodes/executors/implementation_utils.hpp" +#include "nodes/executors/memory_arguments.hpp" +#include "utils/debug_capabilities.h" + +namespace ov { +namespace intel_cpu { + +static bool checkPostOps(const PostOps& postOps) { + if (postOps.empty()) { + return true; + } + + if (postOps.size() != 1) { + return false; + } + + const auto activation = std::dynamic_pointer_cast(postOps[0]); + return checkActivationLayerInfo(convertToEltwiseAlgorithm(activation->type())); +} + +static void initFCAttrs(const FCAttrs& attrs, + ACLTensorAttrs& aclTensorAttrs, + ACLFCAttrs& aclfcAttrs, + const MemoryArgs& memory, + arm_compute::GEMMInfo& fullyConnectedLayerInfo, + const PostOps& postOps) { + aclTensorAttrs.hasLayoutTypeNHWC = memory.at(ARG_SRC)->getDescPtr()->hasLayoutType(LayoutType::nspc); + aclfcAttrs.inputPrecision = memory.at(ARG_SRC)->getDescPtr()->getPrecision(); + aclfcAttrs.weightsNonTransposed = attrs.weightsNonTransposed; + + if (!postOps.empty()) { + auto activation = std::dynamic_pointer_cast(postOps[0]); + fullyConnectedLayerInfo.set_activation_info( + getActivationLayerInfo(convertToEltwiseAlgorithm(activation->type()), + activation->alpha(), + activation->beta(), + activation->gamma())); + } + + if (memory.at(ARG_SRC)->getPrecision() != memory.at(ARG_WEI)->getPrecision()) { + aclfcAttrs.isConvertedWeights = true; + } +} + +ACLLowpFullyConnectedExecutor::ACLLowpFullyConnectedExecutor(const FCAttrs& attrs, + const PostOps& postOps, + const MemoryArgs& memory, + const ExecutorContext::CPtr& context) { + dequantizationScales = getDeQuantizedScales(memory); + initFCAttrs(attrs, aclTensorAttrs, aclfcAttrs, memory, gemmInfo, postOps); + packedWeights = acl_fc_executor::prepareWeightMemory(memory, + context, + attrs, + aclfcAttrs, + postOps, + expectedWeightFormat, + weiTensorInfo); +} + +bool ACLLowpFullyConnectedExecutor::supports(const FCConfig& config) { + const auto src0 = srcType(config); + const auto src1 = weiType(config); + const auto dst = dstType(config); + if ((src0 != ov::element::i8) || (src1 != ov::element::i8) || (dst != ov::element::f32)) { + return false; + } + + VERIFY(checkPostOps(config.postOps), UNSUPPORTED_TYPE_OF_POSTOPS); + VERIFY(one_of(srcRank(config), 2U, 3U, 4U), UNSUPPORTED_SRC_RANK); + VERIFY(one_of(weiRank(config), 2U, 3U, 4U), UNSUPPORTED_WEI_RANK); + return true; +} + +void ACLLowpFullyConnectedExecutor::updateTensorsShapes(ACLShapes& aclMemoryShapes) { + acl_fc_executor::updateFCTensorsShapes(aclMemoryShapes); +} + +arm_compute::Status ACLLowpFullyConnectedExecutor::validateTensorsInfo(const ACLInfos& aclMemoryInfos) { + auto& tensor_info = aclMemoryInfos[ACLArgs::ACL_SRC_0]; + if (dequantizationScales.empty()) { + tensor_info->set_quantization_info(arm_compute::QuantizationInfo(1.f)); + } else { + tensor_info->set_quantization_info(arm_compute::QuantizationInfo(dequantizationScales[0])); + } + + auto& tensor_info_weights = aclMemoryInfos[ACLArgs::ACL_WEI]; + tensor_info_weights->set_quantization_info(arm_compute::QuantizationInfo(1.f)); + + const auto matMulValid = + arm_compute::NEGEMMLowpMatrixMultiplyCore::validate(aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), + aclMemoryInfos[ACLArgs::ACL_WEI].get(), + aclMemoryInfos[ACLArgs::ACL_BIAS].get(), + aclMemoryInfos[ACLArgs::ACL_DST].get(), + gemmInfo); + return matMulValid; +} + +ACLFunction ACLLowpFullyConnectedExecutor::configureFunction(const ACLTensors& aclMemoryTensors) { + auto gemm = std::make_unique(); + gemm->configure(aclMemoryTensors[ACLArgs::ACL_SRC_0].get(), + aclMemoryTensors[ACLArgs::ACL_WEI].get(), + aclMemoryTensors[ACLArgs::ACL_BIAS].get(), + aclMemoryTensors.at(ACLArgs::ACL_DST).get(), + gemmInfo); + + if (aclfcAttrs.isConvertedWeights || !aclfcAttrs.weightsNonTransposed) { + aclTensorAttrs.memoryUsageIndicator[ACLArgs::ACL_WEI] = false; + aclMemoryTensors[ACLArgs::ACL_WEI]->allocator()->import_memory(packedWeights->getData()); + } + return gemm; +} + +std::shared_ptr ACLLowpFullyConnectedExecutor::initTensorInfo( + const arm_compute::TensorShape& tensorShape, + const arm_compute::DataType& dataType, + const arm_compute::DataLayout& dataLayout) { + arm_compute::DataType result; + switch (dataType) { + case arm_compute::DataType::S8: { + result = arm_compute::DataType::QASYMM8_SIGNED; + break; + } + case arm_compute::DataType::U8: { + result = arm_compute::DataType::QASYMM8; + break; + } + default: { + result = dataType; + break; + } + } + + return ACLCommonExecutor::initTensorInfo(tensorShape, result, dataLayout); +} + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.hpp new file mode 100644 index 00000000000000..3912328077df63 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.hpp @@ -0,0 +1,51 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "acl_common_executor.hpp" +#include "acl_fullyconnected_utils.hpp" +#include "nodes/executors/fullyconnected_config.hpp" + +namespace ov { +namespace intel_cpu { + +class ACLLowpFullyConnectedExecutor : public ACLCommonExecutor { +public: + ACLLowpFullyConnectedExecutor(const FCAttrs& attrs, + const PostOps& postOps, + const MemoryArgs& memory, + const ExecutorContext::CPtr& context); + + static bool supports(const FCConfig& config); + + void updateTensorsShapes(ACLShapes& aclMemoryShapes) override; + + arm_compute::Status validateTensorsInfo(const ACLInfos& aclMemoryInfos) override; + + ACLFunction configureFunction(const ACLTensors& aclMemoryTensors) override; + + impl_desc_type implType() const override { + return impl_desc_type::gemm_acl; + } + +protected: + std::shared_ptr initTensorInfo(const arm_compute::TensorShape& tensorShape, + const arm_compute::DataType& dataType, + const arm_compute::DataLayout& dataLayout) override; + +private: + arm_compute::GEMMInfo gemmInfo; + arm_compute::WeightFormat expectedWeightFormat; + arm_compute::TensorInfo weiTensorInfo; + + MemoryCPtr packedWeights; + ACLFCAttrs aclfcAttrs; + std::vector dequantizationScales; +}; + +using ACLLowpFullyConnectedExecutorPtr = std::shared_ptr; + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/common/common_utils.hpp b/src/plugins/intel_cpu/src/nodes/executors/common/common_utils.hpp new file mode 100644 index 00000000000000..614caead1a39b1 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/common/common_utils.hpp @@ -0,0 +1,66 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +// @file common_utils.hpp +// Contains utility methods used by all executors +// + +#pragma once + +#include + +#include "nodes/executors/memory_arguments.hpp" +#include "utils/cpp/maybe_unused.hpp" +#include "utils/cpu_utils.hpp" + +namespace ov { +namespace intel_cpu { + +OV_CPU_MAYBE_UNUSED_FUNCTION static std::vector getDeQuantizedScales(const MemoryArgs& memory) { + if (!memory.count(ARG_DST_DEQ_SCALE)) + return {}; + + auto scalesMemory = memory.at(ARG_DST_DEQ_SCALE); + + auto scalesData = static_cast(scalesMemory->getData()); + + if (!scalesData) + return {}; + + auto dstShape = memory.at(ARG_DST)->getShape(); + auto dqScalesShape = scalesMemory->getShape(); + + auto scalesDims = getNormalizedDimsBySize(dqScalesShape.getDims(), dstShape.getDims().size()); + + auto scaleSize = std::accumulate(scalesDims.begin(), scalesDims.end(), std::size_t(1), std::multiplies()); + + std::vector DQScales(scaleSize, 1.0); + + OPENVINO_ASSERT(scaleSize == 1 || DQScales.size() == 1 || DQScales.size() == scaleSize, + "set invalid scales size , DQScales vector size: ", + DQScales.size(), + ", scale data size: ", + scaleSize); + + // @todo do we really need to broadcast dq scales and then resize them back? + if (scaleSize > DQScales.size()) + DQScales.resize(scaleSize, DQScales[0]); + if (1 == scaleSize) { + std::transform(DQScales.begin(), DQScales.end(), DQScales.begin(), [=](float val) { + return (scalesData[0] * val); + }); + } else { + for (size_t i = 0; i < DQScales.size(); i++) { + DQScales[i] *= scalesData[i]; + } + } + if (std::all_of(DQScales.begin(), DQScales.end(), [&](float val) { + return (val == DQScales[0]); + })) + DQScales.resize(1); + + return DQScales; +} + +} // namespace intel_cpu +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/executors/debug_messages.hpp b/src/plugins/intel_cpu/src/nodes/executors/debug_messages.hpp index 222779a00ee18f..cba22a25c751d6 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/debug_messages.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/debug_messages.hpp @@ -4,20 +4,21 @@ #pragma once -#define UNSUPPORTED_SPARSE_WEIGHTS " sparse weights are not supported" -#define UNSUPPORTED_WEIGHTS_DECOMPRESSION " weights decompression is not supported" -#define UNSUPPORTED_POST_OPS " post ops are not supported" -#define UNSUPPORTED_NUMBER_OF_POSTOPS " the number of post ops is not supported" -#define UNSUPPORTED_TYPE_OF_POSTOPS " the type of post ops is not supported" -#define UNSUPPORTED_SRC_PRECISIONS " unsupported src precisions" -#define UNSUPPORTED_WEI_PRECISIONS " unsupported wei precisions" -#define UNSUPPORTED_DST_PRECISIONS " unsupported dst precisions" -#define UNSUPPORTED_ISA " unsupported isa" -#define UNSUPPORTED_SRC_RANK " unsupported src rank" -#define UNSUPPORTED_WEI_RANK " unsupported wei rank" -#define UNSUPPORTED_DST_RANK " unsupported dst rank" -#define UNSUPPORTED_DST_STRIDES " unsupported dst strides" -#define HEURISTICS_MISMATCH " heuristics mismatch" +#define UNSUPPORTED_SPARSE_WEIGHTS " sparse weights are not supported" +#define UNSUPPORTED_WEIGHTS_DECOMPRESSION " weights decompression is not supported" +#define UNSUPPORTED_POST_OPS " post ops are not supported" +#define UNSUPPORTED_NUMBER_OF_POSTOPS " the number of post ops is not supported" +#define UNSUPPORTED_TYPE_OF_POSTOPS " the type of post ops is not supported" +#define UNSUPPORTED_SRC_PRECISIONS " unsupported src precisions" +#define UNSUPPORTED_WEI_PRECISIONS " unsupported wei precisions" +#define UNSUPPORTED_DST_PRECISIONS " unsupported dst precisions" +#define UNSUPPORTED_ISA " unsupported isa" +#define UNSUPPORTED_SRC_RANK " unsupported src rank" +#define UNSUPPORTED_WEI_RANK " unsupported wei rank" +#define UNSUPPORTED_DST_RANK " unsupported dst rank" +#define UNSUPPORTED_DST_STRIDES " unsupported dst strides" +#define HEURISTICS_MISMATCH " heuristics mismatch" +#define UNSUPPORTED_PER_CHANNEL_QUANTIZATION " unsupported per-channel quantization" #define VERIFY(condition, ...) \ do { \ diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index 42101ce3fca257..bc55af8cfbb0e2 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -9,6 +9,7 @@ #include "debug_messages.hpp" #include "implementation_utils.hpp" #include "memory_desc/cpu_memory_desc.h" +#include "nodes/executors/common/common_utils.hpp" #include "nodes/executors/convolution_config.hpp" #include "nodes/executors/dnnl/dnnl_convolution_primitive.hpp" #include "nodes/executors/dnnl/dnnl_fullyconnected.hpp" @@ -31,6 +32,7 @@ #if defined(OV_CPU_WITH_ACL) # include "nodes/executors/acl/acl_fullyconnected.hpp" +# include "nodes/executors/acl/acl_lowp_fullyconnected.hpp" #endif #if defined(OV_CPU_WITH_SHL) @@ -89,6 +91,11 @@ static const TypeMapping aclFCTypeMapping { {{_any, _any, _any, _any}, pt(just(), just(), just(), just())} }; +static const TypeMapping aclLowpFCTypeMapping { + // {src, wei, bia, dst} pt + {{_i8, _i8, _any, _f32}, pt(bypass(), bypass(), use<3>(), bypass())} +}; + static const MappingNotation dnnlConvolutionMappingNotation { ARG_SRC, ARG_WEI, ARG_BIAS, ARG_DST }; @@ -374,6 +381,38 @@ const std::vector>& getImplementations() { const ExecutorContext::CPtr context) { return std::make_shared(attrs, postOps, memory, context); }) + OV_CPU_INSTANCE_ACL( + "fullyconnected_acl_lowp", + ExecutorType::Acl, + OperationType::FullyConnected, + ShapeTolerance::Agnostic, + // supports + [](const FCConfig& config) -> bool { + VERIFY(noSparseDecompression(config), UNSUPPORTED_SPARSE_WEIGHTS); + VERIFY(noWeightsDecompression(config), UNSUPPORTED_WEIGHTS_DECOMPRESSION); + return ACLLowpFullyConnectedExecutor::supports(config); + }, + // requiresFallback + [](const FCConfig& config) -> ov::optional> { + return requiresFallbackCommon(config, + aclLowpFCTypeMapping, + aclFCLayoutConfig, + aclFullyConnectedMappingNotation); + }, + // acceptsShapes + [](const MemoryArgs& memory) -> bool { + const auto dequantizationScales = getDeQuantizedScales(memory); + bool isPerChannelQuantization = dequantizationScales.size() > 1; + // per-channel quantization is not unsupported by ACL + return !isPerChannelQuantization; + }, + // create + [](const FCAttrs& attrs, + const PostOps& postOps, + const MemoryArgs& memory, + const ExecutorContext::CPtr context) { + return std::make_shared(attrs, postOps, memory, context); + }) OV_CPU_INSTANCE_SHL( "fullyconnected_shl", ExecutorType::Shl, diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index 3092356e1189b6..40a4fc4a1739c4 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -59,6 +59,7 @@ if(NOT (ARM OR AARCH64)) ${CMAKE_CURRENT_SOURCE_DIR}/custom/single_layer_tests/instances/arm ${CMAKE_CURRENT_SOURCE_DIR}/custom/subgraph_tests/src/arm ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/snippets/arm + ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/low_precision_transformations/aarch64 ${CMAKE_CURRENT_SOURCE_DIR}/utils/arm) else() # temporary disable all custom tests for ARM @@ -81,7 +82,8 @@ endif() if(NOT X86_64) list(APPEND EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/custom/single_layer_tests/instances/x64 - ${CMAKE_CURRENT_SOURCE_DIR}/custom/subgraph_tests/src/x64) + ${CMAKE_CURRENT_SOURCE_DIR}/custom/subgraph_tests/src/x64 + ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/low_precision_transformations/x64) endif() ov_add_test_target( diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/aarch64/fully_connected_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/aarch64/fully_connected_transformation.cpp new file mode 100644 index 00000000000000..d5ca57a1d94910 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/aarch64/fully_connected_transformation.cpp @@ -0,0 +1,113 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "low_precision_transformations/fully_connected_transformation.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +const std::vector netPrecisions = { + ov::element::f32 +}; + +const std::vector shapes = { + { + ov::PartialShape{ 1, 16 }, + ov::PartialShape{ 16, 8 }, + false, + false + }, + { + ov::PartialShape{ 1, 1, 16 }, + ov::PartialShape{ 1, 16, 8 }, + false, + false + }, + { + ov::PartialShape{ 1, 16 }, + ov::PartialShape{ 8, 16 }, + false, + true + }, + { + ov::PartialShape{ 1, 1, 16 }, + ov::PartialShape{ 1, 8, 16 }, + false, + true + }, + { + ov::PartialShape{ 16, 1 }, + ov::PartialShape{ 16, 8 }, + true, + false + }, + { + ov::PartialShape{ 1, 16, 1 }, + ov::PartialShape{ 1, 16, 8 }, + true, + false + }, + { + ov::PartialShape{ 16, 1 }, + ov::PartialShape{ 8, 16 }, + true, + true + }, + { + ov::PartialShape{ 1, 16, 1 }, + ov::PartialShape{ 1, 8, 16 }, + true, + true + } +}; + +const std::vector trasformationParamValues = { + LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams() +}; + +const std::vector activations = { + { + true, // activation + false, // per-channel + true, // FQ + false, // bias + "fullyConnected,relu_original" + }, + { + false, // activation + false, // per-channel + true, // FQ + false, // bias + "fullyconnected_original" + }, + { + true, // activation + true, // per-channel + false, // FQ + false, // bias + "fullyConnected,relu_original" // dequantization is not supported for per-channel quantization + }, + { + true, // activation + false, // per-channel + true, // FQ + true, // bias + "fullyConnected" + }, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_LPT, FullyConnectedTransformation, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(shapes), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(trasformationParamValues), + ::testing::ValuesIn({ov::element::i8 /*, ov::element::u8*/}), + ::testing::ValuesIn(activations), + ::testing::Values("gemm_acl_i8")), + FullyConnectedTransformation::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/add_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/add_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/assign_and_read_value_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/assign_and_read_value_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/batch_to_space_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/batch_to_space_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/clamp_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/clamp_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_child_and_output.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_child_and_output.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_different_precision_on_children.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_different_precision_on_children.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_intermediate_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_intermediate_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_neighbors_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_neighbors_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_split_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/concat_with_split_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/convolution_backprop_data_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/convolution_backprop_data_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/convolution_qdq_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/convolution_qdq_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/convolution_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/convolution_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/depth_to_space_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/depth_to_space_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/elementwise_branch_selection_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/elementwise_branch_selection_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/eliminate_fake_quantize_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/eliminate_fake_quantize_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_and_avg_pool_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_and_avg_pool_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_and_max_pool_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_and_max_pool_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_and_two_output_branches_with_convolution.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_and_two_output_branches_with_convolution.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_precision_selection_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_precision_selection_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_with_dq_not_optimal_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fq_with_dq_not_optimal_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fully_connected_transformation.cpp similarity index 55% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fully_connected_transformation.cpp index 0368215a5cf5a4..6d4aefe048188e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fully_connected_transformation.cpp @@ -14,7 +14,7 @@ const std::vector netPrecisions = { ov::element::f32 }; -const std::vector shapes = { +const std::vector shapes = { { ov::PartialShape{ 1, 16 }, ov::PartialShape{ 16, 8 }, @@ -39,11 +39,45 @@ const std::vector trasform LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams() }; +const std::vector activations = { + { + true, // activation + false, // per-channel + true, // FQ + false, // bias + "fullyconnected,relu_original,relu" + }, + { + false, // activation + false, // per-channel + true, // FQ + false, // bias + "fullyconnected" + }, + { + true, // activation + true, // per-channel + false, // FQ + false, // bias + "fullyconnected,relu_original,relu" + }, + { + true, // activation + false, // per-channel + true, // FQ + true, // bias + "fullyconnected/dequantizationmultiply,add,relu" + }, +}; + INSTANTIATE_TEST_SUITE_P(smoke_LPT, FullyConnectedTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(shapes), ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(trasformationParamValues)), + ::testing::ValuesIn(trasformationParamValues), + ::testing::ValuesIn({ov::element::i8/*, ov::element::u8*/}), + ::testing::ValuesIn(activations), + ::testing::Values("")), FullyConnectedTransformation::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_convert_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_convert_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_dequantize_to_fq_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_dequantize_to_fq_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_fq_and_scale_shift_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_fq_and_scale_shift_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_multiply_to_fq_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_multiply_to_fq_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_subtract_to_fq_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/fuse_subtract_to_fq_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gemm_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gemm_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/group_convolution_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/group_convolution_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/groupconvolution_qdq_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/groupconvolution_qdq_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/interpolate_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/interpolate_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mat_mul_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mat_mul_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mat_mul_with_constant_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mat_mul_with_constant_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mat_mul_with_optimized_constant_fq.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mat_mul_with_optimized_constant_fq.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/move_fake_quantize_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/move_fake_quantize_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/multiply_to_group_convolution.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/multiply_to_group_convolution.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/multiply_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/multiply_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/multiply_with_one_parent.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/multiply_with_one_parent.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mvn_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/mvn_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/normalize_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/normalize_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/output_layers.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/output_layers.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/output_layers_concat.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/output_layers_concat.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/output_layers_concat_multi_channel.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/output_layers_concat_multi_channel.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/pad_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/pad_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/prelu_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/prelu_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/pull_reshape_through_dequantization.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/pull_reshape_through_dequantization.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/recurrent_cell_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/recurrent_cell_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_max_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_max_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_mean_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_mean_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_min_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_min_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_sum_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reduce_sum_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/relu_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/relu_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reshape_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/reshape_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/shuffle_channels_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/shuffle_channels_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/space_to_batch_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/space_to_batch_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/split_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/split_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/squeeze_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/squeeze_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/strided_slice_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/strided_slice_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/subtract_multiply_to_multiply_add.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/subtract_multiply_to_multiply_add.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/subtract_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/subtract_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/transpose_after_matmul_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/transpose_after_matmul_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/transpose_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/transpose_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/unsqueeze_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/unsqueeze_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/variadic_split_transformation.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/variadic_split_transformation.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index e9b38fedc0b4e5..4c21c06c491179 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -377,7 +377,8 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i32.*Conversion=i8.*)"); // by calc abs_threshold with expected value retVector.emplace_back(R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.*_eltwise_op_type=Div_.*_model_type=i32_.*)"); - + // int8 / code-generation specific + retVector.emplace_back(R"(smoke_LPT.*)"); retVector.emplace_back(R"(.*smoke_RoPETest.*)"); #endif @@ -479,8 +480,6 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(smoke_TestsDFT_(1|2|3|4)d/DFTLayerTest.Inference.*)"); // Issue 88764, 91647, 108802: accuracy issue retVector.emplace_back(R"(MultipleLSTMCellTest/MultipleLSTMCellTest.CompareWithRefs.*)"); - // int8 / code-generation specific - retVector.emplace_back(R"(smoke_LPT.*)"); // Compressed weights are not supported retVector.emplace_back(R"(smoke_MatMulCompressedWeights.*)"); retVector.emplace_back(R"(smoke_MatMulSharedCompressedWeights.*)"); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp index 71978473696a0b..cd3e8bedeaabe2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp @@ -15,7 +15,7 @@ const std::vector netPrecisions = { ov::element::f16 }; -const std::vector shapes = { +const std::vector shapes = { { { 1, 16 }, { 16, 8 }, @@ -40,11 +40,38 @@ const std::vector trasform LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams() }; +const std::vector activations = { + { + true, // activation + false, // per-channel + true, // FQ + false, // bias + "" + }, + { + false, // activation + false, // per-channel + true, // FQ + false, // bias + "" + }, + { + true, // activation + true, // per-channel + false, // FQ + false, // bias + "" + }, +}; + INSTANTIATE_TEST_SUITE_P(smoke_LPT, FullyConnectedTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(shapes), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(trasformationParamValues)), + ::testing::ValuesIn(trasformationParamValues), + ::testing::ValuesIn({ov::element::i8/*, ov::element::u8*/}), + ::testing::ValuesIn(activations), + ::testing::Values("")), FullyConnectedTransformation::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp index 731ce44224e33b..0e51c710e0ea60 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -class MatMulShapes { +class FullyConnectedShapes { public: ov::PartialShape inputA; ov::PartialShape inputB; @@ -16,11 +16,23 @@ class MatMulShapes { bool transposeB; }; +class FullyConnectedParams { +public: + bool activation; + bool perChannelWeights; + bool fq; + bool bias; + std::string originalLayersNames; +}; + typedef std::tuple< ov::element::Type, - MatMulShapes, + FullyConnectedShapes, std::string, - ov::pass::low_precision::LayerTransformation::Params> FullyConnectedTransformationParams; + ov::pass::low_precision::LayerTransformation::Params, + ov::element::Type, + FullyConnectedParams, + std::string> FullyConnectedTransformationParams; namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index f72f6d90333613..da4c46c81df005 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -5,38 +5,52 @@ #include "low_precision_transformations/fully_connected_transformation.hpp" #include +#include #include #include -#include #include "common_test_utils/common_utils.hpp" +#include "openvino/util/common_util.hpp" #include "ov_lpt_models/mat_mul.hpp" namespace LayerTestsDefinitions { std::string FullyConnectedTransformation::getTestCaseName(const testing::TestParamInfo& obj) { ov::element::Type precision; - MatMulShapes shapes; + FullyConnectedShapes shapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; - std::tie(precision, shapes, targetDevice, params) = obj.param; + ov::element::Type weightsType; + FullyConnectedParams activation; + std::string expectedPrimitiveType; + std::tie(precision, shapes, targetDevice, params, weightsType, activation, expectedPrimitiveType) = obj.param; std::ostringstream result; result << - get_test_case_name_by_params(precision, shapes.inputA, targetDevice, params) << - shapes.inputB << "_" << - shapes.transposeA << "_" << - shapes.transposeB; + get_test_case_name_by_params(precision, shapes.inputA, targetDevice, params) << + shapes.inputB << "_" << + "transposeA=" << shapes.transposeA << "_" << + "transposeB=" << shapes.transposeB << "_" << + weightsType << "_" << + "Activation=" << activation.activation << "_" << + "perChannelWeights=" << activation.perChannelWeights << "_" << + "FQ=" << activation.fq << "_" << + "withBias=" << activation.bias << "_" << + activation.originalLayersNames << "_" << + expectedPrimitiveType; return result.str(); } void FullyConnectedTransformation::SetUp() { ov::element::Type precision; - MatMulShapes shapes; + FullyConnectedShapes shapes; ov::pass::low_precision::LayerTransformation::Params params; - std::tie(precision, shapes, targetDevice, params) = this->GetParam(); + ov::element::Type weightsType; + FullyConnectedParams activation; + std::string expectedPrimitiveType; + std::tie(precision, shapes, targetDevice, params, weightsType, activation, expectedPrimitiveType) = this->GetParam(); init_input_shapes({ shapes.inputA, shapes.inputB }); @@ -45,12 +59,33 @@ void FullyConnectedTransformation::SetUp() { shapes.inputA, shapes.inputB, shapes.transposeA, - shapes.transposeB); + shapes.transposeB, + weightsType == ov::element::i8, + activation.bias, + activation.perChannelWeights, + activation.activation, + activation.fq); } TEST_P(FullyConnectedTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); run(); + + const auto& activation = std::get<5>(GetParam()); + if (!activation.originalLayersNames.empty()) { + const auto originalLayersNames = get_property_by_type("FullyConnected", "originalLayersNames"); + EXPECT_EQ(ov::util::to_lower(activation.originalLayersNames), originalLayersNames); + } + + const auto& actualPrecision = get_runtime_precision_by_type("FullyConnected"); + const auto expectedPrecision = std::get<4>(GetParam()); + EXPECT_EQ(actualPrecision, expectedPrecision.to_string()); + + const auto& expectedPrimitiveType = std::get<6>(GetParam()); + if (!expectedPrimitiveType.empty()) { + const std::string actualPrimitiveType = get_property_by_type("FullyConnected", "primitiveType"); + EXPECT_EQ(expectedPrimitiveType, actualPrimitiveType); + } }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp index 10a70f3bc04ee0..b9da9ff8af4833 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp @@ -49,6 +49,8 @@ class LayerTransformation : virtual public ov::test::SubgraphBaseTest { // get runtime precision by operation type std::string get_runtime_precision_by_type(const std::string& layerType); + std::string get_property_by_type(const std::string& layerTypeName, const std::string& propertyName); + // get runtime precision by operation friendly name which can be fused std::string get_runtime_precision_by_fused_name(const std::string& layerName); diff --git a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp index 49e7b0581cae76..a1781c5826e590 100644 --- a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp +++ b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/util/common_util.hpp" namespace LayerTestsUtils { ov::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParamsU8I8AndI8() { @@ -60,15 +61,15 @@ std::string LayerTransformation::get_test_case_name_by_params( namespace { template -std::string find_node_by_runtime_precision(const ov::CompiledModel& execNet, IsNodeF is_node_f) { +std::string find_node_by_runtime_property(const ov::CompiledModel& execNet, IsNodeF is_node_f, const std::string& propertyName = "runtimePrecision") { const std::shared_ptr& execFunction = execNet.get_runtime_model(); for (const auto& op : execFunction->get_ops()) { if (!is_node_f(op)) continue; const ov::RTMap& rtInfo = op->get_rt_info(); - const auto& it = rtInfo.find("runtimePrecision"); - OPENVINO_ASSERT(it != rtInfo.end(), "Runtime precision is not found for node: ", op->get_friendly_name()); + const auto& it = rtInfo.find(propertyName); + OPENVINO_ASSERT(it != rtInfo.end(), "Runtime property \"", propertyName, "\" is not found for node: ", op->get_friendly_name()); return it->second.as(); } @@ -80,7 +81,7 @@ std::string LayerTransformation::get_runtime_precision(const std::string& layerN auto is_node_f = [layerName](const std::shared_ptr& op) { return op->get_friendly_name() == layerName; }; - return find_node_by_runtime_precision(compiledModel, is_node_f); + return find_node_by_runtime_property(compiledModel, is_node_f); } std::string LayerTransformation::get_runtime_precision_by_type(const std::string& layerType) { @@ -91,7 +92,18 @@ std::string LayerTransformation::get_runtime_precision_by_type(const std::string OPENVINO_ASSERT(typeIt != rtInfo.end(), "Layer is not found for type: ", layerType); return typeIt->second.as() == layerType; }; - return find_node_by_runtime_precision(compiledModel, is_node_f); + return find_node_by_runtime_property(compiledModel, is_node_f); +} + +std::string LayerTransformation::get_property_by_type(const std::string& layerTypeName, const std::string& propertyName) { + auto is_node_f = [&layerTypeName](const std::shared_ptr& op) { + const auto& rtInfo = op->get_rt_info(); + const auto& typeIt = rtInfo.find("layerType"); + + OPENVINO_ASSERT(typeIt != rtInfo.end(), "Layer is not found for type: ", layerTypeName); + return typeIt->second.as() == layerTypeName; + }; + return ov::util::to_lower(find_node_by_runtime_property(compiledModel, is_node_f, propertyName)); } namespace { @@ -116,7 +128,7 @@ std::string LayerTransformation::get_runtime_precision_by_fused_name(const std:: OPENVINO_ASSERT(nameIt != rtInfo.end(), "originalLayersNames is not found for node: ", layerName); return has_layer(nameIt->second.as(), layerName); }; - return find_node_by_runtime_precision(compiledModel, is_node_f); + return find_node_by_runtime_property(compiledModel, is_node_f); } bool LayerTransformation::check_execution_order(const std::vector& orderedOpsTypes) { diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp index 787e1f6ebe8bd4..f9bd7f774287ec 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp @@ -24,17 +24,23 @@ class MatMulFunction { static std::shared_ptr getOriginal( const ov::element::Type precision, - const ov::PartialShape inputShape1, - const ov::PartialShape inputShape2, + const ov::PartialShape& inputShape1, + const ov::PartialShape& inputShape2, const bool transpose1, - const bool transpose2); + const bool transpose2, + const bool signedWeights, + const bool bias, + const bool perChannelWeightsDequantization, + const bool relu, + const bool fq); static std::shared_ptr getOriginal( const ov::element::Type precision, const ov::Shape& inputShape1, const FakeQuantizeOnData& fqOnData1, const ov::Shape& inputShape2, - const FakeQuantizeOnData& fqOnData2); + const FakeQuantizeOnData& fqOnData2, + const bool requantization = false); static std::shared_ptr getOriginal(const ov::element::Type netPrecision, const ov::PartialShape& inputShape1, diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp index 1b1351ef1b3399..be502d5c3050cc 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp @@ -11,6 +11,7 @@ #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "ov_lpt_models/common/builders.hpp" +#include "common_test_utils/node_builders/constant.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" namespace ov { @@ -49,36 +50,96 @@ std::shared_ptr MatMulFunction::getOriginal( return function; } +namespace { +std::vector generate_dequantization_values( + const ov::Shape& shape, + const size_t levels, + const bool low) { + const auto shape_size = ov::shape_size(shape); + std::vector values(shape_size); + for (size_t i = 0; i < shape_size; ++i) { + values[i] = low ? -128.f / (static_cast(i) + 1.f) : 127.f / (static_cast(i) + 1.f); + } + return values; +} +} // namespace + std::shared_ptr MatMulFunction::getOriginal( - const ov::element::Type precision, - const ov::PartialShape inputShape1, - const ov::PartialShape inputShape2, - const bool transpose1, - const bool transpose2) { + const ov::element::Type precision, + const ov::PartialShape& inputShape1, + const ov::PartialShape& inputShape2, + const bool transpose1, + const bool transpose2, + const bool signedOnWeights, + const bool bias, + const bool perChannelWeightsDequantization, + const bool relu, + const bool fq) { const auto paramNode = std::make_shared(precision, inputShape1); const std::vector constShapes(inputShape1.rank().get_length(), 1ul); - const auto fakeQuantizeOnAcitvations = ov::test::utils::make_fake_quantize( - paramNode, precision, 256ul, constShapes, - { 0.f }, { 255.f / 4.f }, { 0.f }, { 255.f / 4.f }); + const auto fakeQuantizeOnAcitvations = signedOnWeights ? + ov::test::utils::make_fake_quantize( + paramNode, precision, 256ul, constShapes, + { -128.f / 4.f }, { 127.f / 4.f }, { -128.f / 4.f }, { 127.f / 4.f }) : + ov::test::utils::make_fake_quantize( + paramNode, precision, 256ul, constShapes, + { 0.f }, { 255.f / 4.f }, { 0.f }, { 255.f / 4.f }); fakeQuantizeOnAcitvations->set_friendly_name("fakeQuantizeOnAcitvations"); - auto weightsConst = std::make_shared( - precision, - inputShape2.to_shape(), - std::vector({ 1.f })); - const auto fakeQuantizeOnWeights = ov::test::utils::make_fake_quantize( - weightsConst, precision, 256ul, { 1ul, 1ul }, - { -128.f / 8.f }, { 127.f / 8.f }, { -128.f / 8.f }, { 127.f / 8.f }); - fakeQuantizeOnWeights->set_friendly_name("fakeQuantizeOnWeights"); + const size_t channel = inputShape2[inputShape2.size() - 2].get_length(); + + // fq + std::shared_ptr parentOnWeights; + if (fq) { + auto weightsConst = ov::test::utils::make_constant(precision, inputShape2.to_shape()); + parentOnWeights = perChannelWeightsDequantization ? + ov::test::utils::make_fake_quantize( + weightsConst, precision, 256ul, + Shape{channel, 1}, + generate_dequantization_values(Shape{channel, 1}, 256ul, true), + generate_dequantization_values(Shape{channel, 1}, 256ul, false), + generate_dequantization_values(Shape{channel, 1}, 256ul, true), + generate_dequantization_values(Shape{channel, 1}, 256ul, false)) : + ov::test::utils::make_fake_quantize( + weightsConst, precision, 256ul, {1ul, 1ul}, + {-128.f / 8.f}, {127.f / 8.f}, {-128.f / 8.f}, {127.f / 8.f}); + } else { + Shape shape = inputShape2.to_shape(); + if (transpose2) { + shape[shape.size() - 1ull] = 1; + } else { + shape[shape.size() - 2ull] = 1; + } + auto weightsConst = ov::test::utils::make_constant(signedOnWeights ? element::i8 : element::u8, inputShape2.to_shape(), {}); + const auto convert = std::make_shared(weightsConst, precision); + + const auto multiplyConst = ov::test::utils::make_constant(precision, shape); + parentOnWeights = std::make_shared(convert, multiplyConst); + } - const std::shared_ptr fullyConnected = std::make_shared( + parentOnWeights->set_friendly_name("fakeQuantizeOnWeights"); + + std::shared_ptr parent = std::make_shared( fakeQuantizeOnAcitvations->output(0), - fakeQuantizeOnWeights->output(0), + parentOnWeights->output(0), transpose1, transpose2); - fullyConnected->set_friendly_name("fullyConnected"); + parent->set_friendly_name("fullyConnected"); + + if (bias) { + ov::Shape bias_shape(parent->get_output_partial_shape(0).size(), 1); + bias_shape.back() = parent->get_output_partial_shape(0).rbegin()->get_length(); + auto bias = ov::test::utils::make_constant(precision, bias_shape); + parent = std::make_shared(parent, bias); + parent->set_friendly_name("add"); + } + + if (relu) { + parent = std::make_shared(parent); + parent->set_friendly_name("relu"); + } - ov::ResultVector results{ std::make_shared(fullyConnected) }; + ov::ResultVector results{ std::make_shared(parent) }; std::shared_ptr function = std::make_shared( results, ov::ParameterVector{ paramNode }, @@ -93,21 +154,40 @@ std::shared_ptr MatMulFunction::getOriginal( const ov::Shape& inputShape1, const FakeQuantizeOnData& fqOnData1, const ov::Shape& inputShape2, - const FakeQuantizeOnData& fqOnData2) { + const FakeQuantizeOnData& fqOnData2, + const bool requantization) { const std::shared_ptr input1 = std::make_shared(precision, inputShape1); input1->set_friendly_name("input1"); const std::shared_ptr input2 = std::make_shared(precision, inputShape2); input2->set_friendly_name("input2"); - const std::shared_ptr matMul = std::make_shared( - makeFakeQuantize(input1, precision, fqOnData1), - makeFakeQuantize(input2, precision, fqOnData2), + std::shared_ptr parent1 = input1; + if (!fqOnData1.empty()) { + parent1 = makeFakeQuantize(parent1, precision, fqOnData1); + } + + std::shared_ptr parent2 = input2; + if (!fqOnData2.empty()) { + parent2 = makeFakeQuantize(parent2, precision, fqOnData2); + } + + std::shared_ptr parent = std::make_shared( + parent1, + parent2, false, false); - matMul->set_friendly_name("matMul"); + parent->set_friendly_name("matMul"); + + if (requantization) { + parent = makeFakeQuantize(parent, precision, fqOnData1); + parent = std::make_shared( + parent, + std::make_shared(ov::element::f32, Shape{1}, std::vector{0.f})); + parent->set_friendly_name("prelu"); + } - std::shared_ptr result = std::make_shared(matMul); + std::shared_ptr result = std::make_shared(parent); std::shared_ptr function = std::make_shared( ov::ResultVector{ result }, From f03770bd3bb9725120739c788d7cbfd50688c3d3 Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Wed, 18 Dec 2024 15:25:22 +0100 Subject: [PATCH 09/60] [DOCS] Benchmark data update for 24.6 v2 (#28127) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- .../benchmarks_files/data/graph-data-ov.json | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json index 59eb81d2462213..c5cfca9df3f095 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json +++ b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json @@ -838,6 +838,76 @@ "UnitDesc": "lower is better" } } + }, + { + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", + "Model": "llama-2-7b-chat", + "featured_SKU": true, + "whats_new_model": false, + "PlatformType": "Intel® Core™, NPU-only", + "Parameters": { + "throughput": { + "Precisions": [ + { + "int4": "", + "int8": "", + "fp16": 3.12, + "fp32": "", + "bf16": "" + } + ], + "Unit": "Tokens per sec", + "UnitDesc": "higher is better" + }, + "latency": { + "Precisions": [ + { + "int4": "", + "int8": "", + "fp16": 320.24, + "fp32": "", + "bf16": "" + } + ], + "Unit": "ms", + "UnitDesc": "lower is better" + } + } + }, + { + "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", + "Model": "phi-3-mini-4k-instruct", + "featured_SKU": true, + "whats_new_model": true, + "PlatformType": "Intel® Core™, NPU-only", + "Parameters": { + "throughput": { + "Precisions": [ + { + "int4": "", + "int8": 0.5, + "fp16": 4.8, + "fp32": "", + "bf16": "" + } + ], + "Unit": "Tokens per sec", + "UnitDesc": "higher is better" + }, + "latency": { + "Precisions": [ + { + "int4": "", + "int8": 1963.5, + "fp16": 208.07, + "fp32": "", + "bf16": "" + } + ], + "Unit": "ms", + "UnitDesc": "lower is better" + } + } }, { "Platform": "Intel® Core™ Ultra 9 processor 288V CPU+iGPU", From 83376a692320c871115fbcdaa400cb54a7f84017 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Wed, 18 Dec 2024 16:07:35 +0100 Subject: [PATCH 10/60] [DOCS] Removing Legacy Features from documentation (#27730) Removing `Legacy Features` section from docs. --------- Signed-off-by: sgolebiewski-intel --- .../additional-resources/glossary.rst | 1 - .../supported-devices.rst | 6 - .../getting-performance-numbers.rst | 4 +- .../performance-benchmarks-faq.rst | 34 +- .../about-openvino/release-notes-openvino.rst | 8 +- .../release-notes-openvino/release-policy.rst | 2 +- .../assets/images/MO_connection_example_1.svg | 3 - .../assets/images/MO_conversion_pipeline.svg | 3 - .../images/MO_graph_after_extractors.svg | 3 - .../assets/images/MO_graph_after_loader.svg | 3 - .../MO_graph_before_partial_inference.svg | 3 - .../assets/images/MO_ports_example_1.svg | 3 - .../assets/images/MO_ports_example_2.svg | 3 - .../images/MO_transformations_graph.svg | 3 - .../assets/images/deploy_encrypted_model.svg | 4 +- .../images/training_extensions_framework.png | 4 +- docs/articles_en/documentation.rst | 1 - .../documentation/legacy-features.rst | 130 --- .../legacy-features/install-dev-tools.rst | 259 ----- .../legacy-features/model-zoo.rst | 31 - .../legacy-features/multi-device.rst | 155 --- .../transition-legacy-conversion-api.rst | 863 ---------------- .../legacy-conversion-api.rst | 188 ---- .../[legacy]-compressing-model-to-fp16.rst | 53 - ...gacy]-convert-models-as-python-objects.rst | 150 --- .../[legacy]-cutting-parts-of-a-model.rst | 585 ----------- ...y]-embedding-preprocessing-computation.rst | 253 ----- .../[legacy]-model-optimizer-faq.rst | 947 ------------------ .../[legacy]-setting-input-shapes.rst | 156 --- .../[legacy]-supported-model-formats.rst | 598 ----------- .../[legacy]-conversion-tutorials.rst | 59 -- .../convert-onnx-faster-r-cnn.rst | 41 - .../convert-onnx-gpt-2.rst | 34 - .../convert-onnx-mask-r-cnn.rst | 41 - .../convert-pytorch-bert-ner.rst | 76 -- .../convert-pytorch-cascade-rcnn-r-101.rst | 51 - .../convert-pytorch-f3-net.rst | 55 - .../convert-pytorch-quartz-net.rst | 61 -- .../convert-pytorch-rcan.rst | 49 - .../convert-pytorch-rnn-t.rst | 137 --- .../convert-pytorch-yolact.rst | 222 ---- .../convert-tensorflow-attention-ocr.rst | 60 -- .../convert-tensorflow-bert.rst | 170 ---- .../convert-tensorflow-crnn.rst | 86 -- .../convert-tensorflow-deep-speech.rst | 108 -- .../convert-tensorflow-efficient-det.rst | 90 -- .../convert-tensorflow-face-net.rst | 42 - .../convert-tensorflow-gnmt.rst | 315 ------ .../convert-tensorflow-language-1b.rst | 131 --- .../convert-tensorflow-ncf.rst | 68 -- .../convert-tensorflow-object-detection.rst | 184 ---- .../convert-tensorflow-retina-net.rst | 31 - .../convert-tensorflow-slim-library.rst | 117 --- ...onvert-tensorflow-wide-and-deep-family.rst | 166 --- .../convert-tensorflow-xlnet.rst | 208 ---- .../convert-tensorflow-yolo.rst | 322 ------ .../[legacy]-convert-onnx.rst | 70 -- .../[legacy]-convert-paddle.rst | 139 --- .../[legacy]-convert-pytorch.rst | 111 -- .../[legacy]-convert-tensorflow-lite.rst | 37 - .../[legacy]-convert-tensorflow.rst | 359 ------- ...legacy]-troubleshooting-reshape-errors.rst | 54 - .../legacy-model-optimizer-extensibility.rst | 326 ------ ...del-optimizer-with-caffe-python-layers.rst | 110 -- ...gacy]-graph-traversal-and-modification.rst | 186 ---- .../[legacy]-model-optimizer-extensions.rst | 60 -- ...egacy]-graph-transformation-extensions.rst | 605 ----------- .../[legacy]-model-optimizer-operation.rst | 110 -- .../[legacy]-optimizer-extractor.rst | 113 --- .../documentation/openvino-ecosystem.rst | 9 - .../openvino-security-add-on.rst | 47 +- .../documentation/openvino-extensibility.rst | 24 +- .../custom-gpu-operations.rst | 13 +- .../frontend-extensions.rst | 3 - .../low-precision-transformations.rst | 18 +- .../documentation/openvino-security.rst | 2 +- docs/articles_en/get-started.rst | 4 +- .../get-started/install-openvino.rst | 13 - .../install-openvino-archive-linux.rst | 2 +- .../install-openvino-archive-macos.rst | 2 +- .../install-openvino-archive-windows.rst | 2 +- .../install-openvino-brew.rst | 9 +- .../install-openvino-conda.rst | 1 - .../install-openvino-vcpkg.rst | 8 +- .../install-openvino/install-openvino-yum.rst | 8 +- .../install-openvino-zypper.rst | 8 +- .../notebooks-installation.rst | 6 - .../learn-openvino/llm_inference_guide.rst | 2 +- .../llm-inference-native-ov.rst | 192 ---- .../openvino-samples/benchmark-tool.rst | 6 +- .../openvino-samples/bert-benchmark.rst | 5 +- .../openvino-samples/hello-classification.rst | 8 +- .../hello-nv12-input-classification.rst | 8 +- .../openvino-samples/hello-reshape-ssd.rst | 14 +- .../image-classification-async.rst | 6 +- .../openvino-samples/model-creation.rst | 4 +- .../openvino-samples/sync-benchmark.rst | 10 +- .../openvino-samples/throughput-benchmark.rst | 7 +- .../openvino-workflow/model-preparation.rst | 17 - .../model-preparation/convert-model-to-ir.rst | 8 +- .../running-inference/dynamic-shapes.rst | 2 +- .../inference-devices-and-modes.rst | 1 - .../auto-device-selection.rst | 1 - .../gpu-device.rst | 2 +- .../npu-device.rst | 10 +- .../general-optimizations.rst | 4 +- .../layout-api-overview.rst | 1 - .../optimizing-throughput.rst | 13 +- docs/dev/build_mac_arm.md | 8 +- docs/dev/build_mac_intel_cpu.md | 8 +- docs/dev/installing.md | 260 ++--- .../nncf/code/pruning_tf.py | 10 +- .../nncf/code/pruning_torch.py | 10 +- docs/optimization_guide/nncf/code/qat_tf.py | 10 +- .../optimization_guide/nncf/code/qat_torch.py | 2 +- samples/cpp/benchmark_app/README.md | 2 +- src/bindings/python/docs/build.md | 4 +- tools/benchmark_tool/README.md | 2 +- 118 files changed, 191 insertions(+), 10285 deletions(-) delete mode 100644 docs/articles_en/assets/images/MO_connection_example_1.svg delete mode 100644 docs/articles_en/assets/images/MO_conversion_pipeline.svg delete mode 100644 docs/articles_en/assets/images/MO_graph_after_extractors.svg delete mode 100644 docs/articles_en/assets/images/MO_graph_after_loader.svg delete mode 100644 docs/articles_en/assets/images/MO_graph_before_partial_inference.svg delete mode 100644 docs/articles_en/assets/images/MO_ports_example_1.svg delete mode 100644 docs/articles_en/assets/images/MO_ports_example_2.svg delete mode 100644 docs/articles_en/assets/images/MO_transformations_graph.svg delete mode 100644 docs/articles_en/documentation/legacy-features.rst delete mode 100644 docs/articles_en/documentation/legacy-features/install-dev-tools.rst delete mode 100644 docs/articles_en/documentation/legacy-features/model-zoo.rst delete mode 100644 docs/articles_en/documentation/legacy-features/multi-device.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-compressing-model-to-fp16.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-convert-models-as-python-objects.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-cutting-parts-of-a-model.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-model-optimizer-faq.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-faster-r-cnn.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-gpt-2.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-mask-r-cnn.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-bert-ner.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-cascade-rcnn-r-101.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-f3-net.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-quartz-net.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rcan.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rnn-t.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-yolact.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-attention-ocr.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-bert.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-crnn.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-deep-speech.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-efficient-det.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-face-net.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-gnmt.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-language-1b.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-ncf.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-retina-net.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-slim-library.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-wide-and-deep-family.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-xlnet.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-yolo.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-onnx.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-paddle.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-pytorch.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow-lite.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-troubleshooting-reshape-errors.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-extending-model-optimizer-with-caffe-python-layers.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-model-optimizer-operation.rst delete mode 100644 docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-optimizer-extractor.rst delete mode 100644 docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-native-ov.rst diff --git a/docs/articles_en/about-openvino/additional-resources/glossary.rst b/docs/articles_en/about-openvino/additional-resources/glossary.rst index 9aba2b395525c2..6120b0c9018a54 100644 --- a/docs/articles_en/about-openvino/additional-resources/glossary.rst +++ b/docs/articles_en/about-openvino/additional-resources/glossary.rst @@ -38,7 +38,6 @@ Acronyms and Abbreviations LRN Local Response Normalization mAP Mean Average Precision Intel® OneDNN Intel® OneAPI Deep Neural Network Library - `mo` Command-line tool for model conversion, CLI for ``tools.mo.convert_model`` (legacy) MVN Mean Variance Normalization NCDHW Number of images, Channels, Depth, Height, Width NCHW Number of images, Channels, Height, Width diff --git a/docs/articles_en/about-openvino/compatibility-and-support/supported-devices.rst b/docs/articles_en/about-openvino/compatibility-and-support/supported-devices.rst index 6e0e21335e50c8..3bb46116ee1748 100644 --- a/docs/articles_en/about-openvino/compatibility-and-support/supported-devices.rst +++ b/docs/articles_en/about-openvino/compatibility-and-support/supported-devices.rst @@ -31,11 +31,6 @@ OpenVINO offers the option of running automated inference with the following inf | :doc:`Automatic Batching <../../openvino-workflow/running-inference/inference-devices-and-modes/automatic-batching>`: | automatically groups inference requests to improve device utilization. -| :doc:`(LEGACY) Multi-device Inference <./../../documentation/legacy-features/multi-device>`: -| executes inference on multiple devices. Currently, this mode is considered a legacy - solution. Using Automatic Device Selection instead is advised. - - Feature Support and API Coverage ################################# @@ -52,7 +47,6 @@ Feature Support and API Coverage :doc:`Preprocessing acceleration <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing>` Yes Yes No :doc:`Stateful models <../../openvino-workflow/running-inference/stateful-models>` Yes Yes Yes :doc:`Extensibility <../../documentation/openvino-extensibility>` Yes Yes No - :doc:`(LEGACY) Multi-device execution <./../../documentation/legacy-features/multi-device>` Yes Yes Partial ======================================================================================================================================== ======= ========== =========== diff --git a/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst b/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst index 936f1145a6b3b0..9ba82690b00395 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst @@ -103,7 +103,7 @@ General considerations Some image pre-processing can be baked into OpenVINO IR and accelerated accordingly. For more information, refer to - :doc:`Embedding Pre-processing <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation>` + :doc:`Preprocessing API <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/preprocessing-api-details>`. and :doc:`General Runtime Optimizations <../../openvino-workflow/running-inference/optimize-inference/general-optimizations>`. @@ -192,7 +192,7 @@ execution breakdown. For example, the table below is part of performance counters for :doc:`CPU inference <../../openvino-workflow/running-inference/inference-devices-and-modes/cpu-device>`. -of a `TensorFlow implementation of ResNet-50 `__ +of a TensorFlow implementation of ResNet-50. Keep in mind that since the device is CPU, the ``realTime`` wall clock and the ``cpu`` time layers are the same. Information about layer precision is also stored in the performance counters. diff --git a/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst b/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst index 0f70c93e9c8b96..5495711bc0054a 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst @@ -15,13 +15,7 @@ Performance Information F.A.Q. .. dropdown:: Where can I find the models used in the performance benchmarks? - All models used are included in the GitHub repository of - :doc:`Open Model Zoo <../../documentation/legacy-features/model-zoo>`. - - .. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. + All models used are published on `Hugging Face `__. .. dropdown:: Will there be any new models added to the list used for benchmarking? @@ -35,7 +29,7 @@ Performance Information F.A.Q. open-source tool within the Intel® Distribution of OpenVINO™ toolkit called :doc:`benchmark_app <../../learn-openvino/openvino-samples/benchmark-tool>`. - For diffusers (Stable-Diffusion) and foundational models (aka LLMs) please use the OpenVINO GenAI + For diffusers (Stable-Diffusion) and foundational models (aka LLMs) please use the OpenVINO GenAI opensource repo `OpenVINO GenAI tools/llm_bench `__ For a simple instruction on testing performance, see the :doc:`Getting Performance Numbers Guide `. @@ -93,30 +87,6 @@ Performance Information F.A.Q. - BERT - question / answer - 128 - * - `efficientdet-d0 `__ - - Efficientdet - - classification - - 512x512 - * - `mask_rcnn_resnet50_atrous_coco `__ - - Mask R-CNN ResNet 50 Atrous - - object instance segmentation - - 800x1365 - * - `mobilenet-v2 `__ - - Mobilenet V2 PyTorch - - classification - - 224x224 - * - `resnet-50 `__ - - ResNet-50_v1_ILSVRC-2012 - - classification - - 224x224 - * - `ssd-mobilenet-v1-coco `__ - - ssd-mobilenet-V1-coco onnx model - - object detection - - 300x300 - * - `ssd-resnet34-1200-onnx `__ - - ssd-resnet34 onnx model - - object detection - - 1200x1200 * - `yolov8n `__ - Yolov8nano - object detection diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index e306b5e6f88605..de233e6fa7cc9d 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -1669,7 +1669,7 @@ Deprecation And Support Using deprecated features and components is not advised. They are available to enable a smooth transition to new solutions and will be discontinued in the future. To keep using discontinued features, you will have to revert to the last LTS OpenVINO version supporting them. -For more details, refer to the :doc:`OpenVINO Legacy Features and Components <../documentation/legacy-features>` +For more details, refer to the `OpenVINO Legacy Features and Components __` page. Discontinued in 2024 @@ -1727,7 +1727,7 @@ Deprecated and to be removed in the future * Model Optimizer will be discontinued with OpenVINO 2025.0. Consider using the :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` instead. For more details, see the - :doc:`model conversion transition guide <../documentation/legacy-features/transition-legacy-conversion-api>`. + `model conversion transition guide `__. * OpenVINO property Affinity API will be discontinued with OpenVINO 2025.0. It will be replaced with CPU binding configurations (``ov::hint::enable_cpu_pinning``). * OpenVINO Model Server components: @@ -1756,10 +1756,6 @@ Deprecated and to be removed in the future * See alternative: `Machine Translation Python* Demo `__ - * `Open Model Zoo Tools Tutorial `__ - - * No alternatives, demonstrates deprecated tools. - * `Super Resolution with OpenVINO™ `__ * See alternative: `Super Resolution with PaddleGAN and OpenVINO `__ diff --git a/docs/articles_en/about-openvino/release-notes-openvino/release-policy.rst b/docs/articles_en/about-openvino/release-notes-openvino/release-policy.rst index 44ca052ee8e7b9..34107c60b73139 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino/release-policy.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino/release-policy.rst @@ -179,7 +179,7 @@ Additional Information * Binary distribution: * Download from `OpenVINO storage `__ - * `pypi.org `__ + * `pypi.org `__ * `DockerHub* `__ diff --git a/docs/articles_en/assets/images/MO_connection_example_1.svg b/docs/articles_en/assets/images/MO_connection_example_1.svg deleted file mode 100644 index 9e975041032891..00000000000000 --- a/docs/articles_en/assets/images/MO_connection_example_1.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fd1e2d8f82ce07f5d463d6480293935443785979fe16b555cd8e60fb2f253928 -size 55232 diff --git a/docs/articles_en/assets/images/MO_conversion_pipeline.svg b/docs/articles_en/assets/images/MO_conversion_pipeline.svg deleted file mode 100644 index e0448b06dda139..00000000000000 --- a/docs/articles_en/assets/images/MO_conversion_pipeline.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db6f798882e0301f0cf83f1eba90560b5151266612fef2bc5f16a12cf192f0a0 -size 128446 diff --git a/docs/articles_en/assets/images/MO_graph_after_extractors.svg b/docs/articles_en/assets/images/MO_graph_after_extractors.svg deleted file mode 100644 index 7ee1ebe7c1761a..00000000000000 --- a/docs/articles_en/assets/images/MO_graph_after_extractors.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e9d5ee3d23d232fc10072189c0bf18d76f5d5d7217091d81a1ac465d129c034e -size 88648 diff --git a/docs/articles_en/assets/images/MO_graph_after_loader.svg b/docs/articles_en/assets/images/MO_graph_after_loader.svg deleted file mode 100644 index 380db77679be7f..00000000000000 --- a/docs/articles_en/assets/images/MO_graph_after_loader.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e882e25b5117e4d17a3b94944f58470c0337fafa5afc2ec6aa01f498c442c5f3 -size 73933 diff --git a/docs/articles_en/assets/images/MO_graph_before_partial_inference.svg b/docs/articles_en/assets/images/MO_graph_before_partial_inference.svg deleted file mode 100644 index b312a0314b0b55..00000000000000 --- a/docs/articles_en/assets/images/MO_graph_before_partial_inference.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7799a6c30352fa74d7d98f993d9ad7b148d975d96778762df410d69133abf8a8 -size 158171 diff --git a/docs/articles_en/assets/images/MO_ports_example_1.svg b/docs/articles_en/assets/images/MO_ports_example_1.svg deleted file mode 100644 index 778ee6fd3ecb7a..00000000000000 --- a/docs/articles_en/assets/images/MO_ports_example_1.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8340d5ca434fe74d19f397c1acd0c92b4ad3b16a563975dc1603a6bf8ef03eb6 -size 55262 diff --git a/docs/articles_en/assets/images/MO_ports_example_2.svg b/docs/articles_en/assets/images/MO_ports_example_2.svg deleted file mode 100644 index 288ce970b3664f..00000000000000 --- a/docs/articles_en/assets/images/MO_ports_example_2.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aed3820019aa5b9d4741c146bd4596e6850ea714e6e44fefe6cccf4707e5f152 -size 55270 diff --git a/docs/articles_en/assets/images/MO_transformations_graph.svg b/docs/articles_en/assets/images/MO_transformations_graph.svg deleted file mode 100644 index 093365f92a8e8d..00000000000000 --- a/docs/articles_en/assets/images/MO_transformations_graph.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:edbc2911e5aa5a672d8ebaf82b3d06f6915e44b8760ac18f88fba1d2e99fddd6 -size 349693 diff --git a/docs/articles_en/assets/images/deploy_encrypted_model.svg b/docs/articles_en/assets/images/deploy_encrypted_model.svg index 61d0dbe710994e..fa897731b54fef 100644 --- a/docs/articles_en/assets/images/deploy_encrypted_model.svg +++ b/docs/articles_en/assets/images/deploy_encrypted_model.svg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f802b1396fafdc8a80c03c4931d4b6290cc10451961ddba5edcef1c8227833b -size 44097 +oid sha256:454a531a9b2d2883ac9a6beb01ce7ecdd7ec69ea2c68d63b39b65f3780c957fe +size 54772 diff --git a/docs/articles_en/assets/images/training_extensions_framework.png b/docs/articles_en/assets/images/training_extensions_framework.png index 3cbbac7fdbfba8..b518aa584a96fc 100644 --- a/docs/articles_en/assets/images/training_extensions_framework.png +++ b/docs/articles_en/assets/images/training_extensions_framework.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b3932d0cf0071c629e1013f3e17a9f8abda800eb01c50b3e826a42127e42da7 -size 48770 +oid sha256:4c8069733dbd51ff2bd47b47e7d2a7083dac55d9faf66dfb61b897d65eb0a545 +size 47828 diff --git a/docs/articles_en/documentation.rst b/docs/articles_en/documentation.rst index 5be7bb9dbc30fb..c1dd34f5373429 100644 --- a/docs/articles_en/documentation.rst +++ b/docs/articles_en/documentation.rst @@ -13,7 +13,6 @@ Documentation API Reference OpenVINO IR format and Operation Sets - Legacy Features Tool Ecosystem OpenVINO Extensibility OpenVINO™ Security diff --git a/docs/articles_en/documentation/legacy-features.rst b/docs/articles_en/documentation/legacy-features.rst deleted file mode 100644 index 2457d28cf24c15..00000000000000 --- a/docs/articles_en/documentation/legacy-features.rst +++ /dev/null @@ -1,130 +0,0 @@ -Legacy Features and Components -============================== - -.. meta:: - :description: A list of deprecated OpenVINO™ components. - -.. toctree:: - :maxdepth: 1 - :hidden: - - OpenVINO Development Tools package - Model Optimizer / Conversion API - Open Model ZOO - legacy-features/multi-device - - -Since OpenVINO has grown very rapidly in recent years, a number of its features -and components have been replaced by other solutions. Some of them are still -supported to assure OpenVINO users are given enough time to adjust their projects, -before the features are fully discontinued. - -This section will give you an overview of these major changes and tell you how -you can proceed to get the best experience and results with the current OpenVINO -offering. - - -| **OpenVINO Development Tools Package** -| *New solution:* OpenVINO Runtime includes all supported components -| *Old solution:* discontinuation planned for OpenVINO 2025.0 -| -| OpenVINO Development Tools used to be the OpenVINO package with tools for - advanced operations on models, such as Model conversion API, Benchmark Tool, - Accuracy Checker, Annotation Converter, Post-Training Optimization Tool, - and Open Model Zoo tools. Most of these tools have been either removed, - replaced by other solutions, or moved to the OpenVINO Runtime package. -| :doc:`See how to install Development Tools ` - - -| **Model Optimizer / Conversion API** -| *New solution:* Direct model support and OpenVINO Converter (OVC) -| *Old solution:* Legacy Conversion API discontinuation planned for OpenVINO 2025.0 -| -| The role of Model Optimizer and later the Conversion API was largely reduced - when all major model frameworks became supported directly. For converting model - files explicitly, it has been replaced with a more light-weight and efficient - solution, the OpenVINO Converter (launched with OpenVINO 2023.1). -| :doc:`See how to use OVC <../openvino-workflow/model-preparation>` -| :doc:`See how to transition from the legacy solution ` - - -| **Open Model ZOO** -| *New solution:* users are encouraged to use public model repositories -| *Old solution:* discontinuation planned for OpenVINO 2025.0 -| -| Open Model ZOO provided a collection of models prepared for use with OpenVINO, - and a small set of tools enabling a level of automation for the process. - Since the tools have been mostly replaced by other solutions and several - other model repositories have recently grown in size and popularity, - Open Model ZOO will no longer be maintained. You may still use its resources - until they are fully removed. -| :doc:`See the Open Model ZOO documentation ` -| `Check the OMZ GitHub project `__ -| As for public model databases, `Hugging Face `__ has - become the recommended model source for OpenVINO. - - -| **Multi-Device Execution** -| *New solution:* Automatic Device Selection -| *Old solution:* Legacy Multi-Device Execution discontinuation planned for OpenVINO 2025.0 -| -| The behavior and results of the Multi-Device Execution mode are covered by the ``CUMULATIVE_THROUGHPUT`` - option of the Automatic Device Selection. The only difference is that ``CUMULATIVE_THROUGHPUT`` uses - the devices specified by AUTO, which means that adding devices manually is not mandatory, - while with MULTI, the devices had to be specified before the inference. -| :doc:`Check the Automatic Device Selection <../openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection>` -| :doc:`Check the legacy solution ` - -Discontinued: -############# - -.. dropdown:: Caffe, and Kaldi model formats - - | *New solution:* conversion to ONNX via external tools - | *Old solution:* model support discontinued with OpenVINO 2024.0 - | `The last version supporting Apache MXNet, Caffe, and Kaldi model formats `__ - | :doc:`See the currently supported frameworks <../openvino-workflow/model-preparation>` - -.. dropdown:: Post-training Optimization Tool (POT) - - | *New solution:* Neural Network Compression Framework (NNCF) now offers the same functionality - | *Old solution:* POT discontinued with OpenVINO 2024.0 - | :doc:`See how to use NNCF for model optimization <../openvino-workflow/model-optimization>` - | `Check the NNCF GitHub project, including documentation `__ - -.. dropdown:: Inference API 1.0 - - | *New solution:* API 2.0 launched in OpenVINO 2022.1 - | *Old solution:* discontinued with OpenVINO 2024.0 - | `2023.2 is the last version supporting API 1.0 `__ - -.. dropdown:: Compile tool - - | *New solution:* the tool is no longer needed - | *Old solution:* discontinued with OpenVINO 2023.0 - | If you need to compile a model for inference on a specific device, use the following script: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/articles_en/assets/snippets/export_compiled_model.py - :language: python - :fragment: [export_compiled_model] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/articles_en/assets/snippets/export_compiled_model.cpp - :language: cpp - :fragment: [export_compiled_model] - -.. dropdown:: TensorFlow integration (OVTF) - - | *New solution:* Direct model support and OpenVINO Converter (OVC) - | *Old solution:* discontinued in OpenVINO 2023.0 - | - | OpenVINO now features a native TensorFlow support, with no need for explicit model - conversion. - diff --git a/docs/articles_en/documentation/legacy-features/install-dev-tools.rst b/docs/articles_en/documentation/legacy-features/install-dev-tools.rst deleted file mode 100644 index 4b0160e11c9082..00000000000000 --- a/docs/articles_en/documentation/legacy-features/install-dev-tools.rst +++ /dev/null @@ -1,259 +0,0 @@ -Install OpenVINO™ Development Tools -===================================== - - -.. meta:: - :description: Learn how to install OpenVINO™ Development Tools on Windows, - Linux, and macOS operating systems, using a PyPi package. - -OpenVINO Development Tools is a set of utilities that make it easy to develop and -optimize models and applications for OpenVINO. It provides the following tools: - -* Model conversion API -* Benchmark Tool -* Accuracy Checker and Annotation Converter -* Model Downloader and other Open Model Zoo tools - -The instructions on this page show how to install OpenVINO Development Tools. If you are a -Python developer, it only takes a few simple steps to install the tools with PyPI. If you -are developing in C/C++, OpenVINO Runtime must be installed separately before installing -OpenVINO Development Tools. - -In both cases, Python 3.9 - 3.12 needs to be installed on your system before starting. - -.. note:: - - From the 2022.1 release, the OpenVINO™ Development Tools can only be installed via PyPI. - -.. _python_developers: - -For Python Developers -##################### - -If you are a Python developer, follow the steps in the -:ref:`Installing OpenVINO Development Tools ` section on this page to -install it. Installing OpenVINO Development Tools will also install OpenVINO Runtime as -a dependency, so you don’t need to install OpenVINO Runtime separately. This option is -recommended for new users. - -.. _cpp_developers: - -For C/C++ Developers -####################### - -If you are a C/C++ developer, you must first install OpenVINO Runtime separately to set -up the C/C++ libraries, sample code, and dependencies for building applications with -OpenVINO. These files are not included with the PyPI distribution. See the -:doc:`Selector Tool <../../get-started/install-openvino>` page to install OpenVINO Runtime -from an archive file for your operating system. - -Once OpenVINO Runtime is installed, you may install OpenVINO Development Tools for access -to tools like ``mo``, Model Downloader, Benchmark Tool, and other utilities that will help -you optimize your model and develop your application. Follow the steps in the -:ref:`Installing OpenVINO Development Tools ` section on this page -to install it. - -.. _install_dev_tools: - -Installing OpenVINO™ Development Tools -###################################### - -Follow these step-by-step instructions to install OpenVINO Development Tools on your computer. -There are two options to install OpenVINO Development Tools: installation into an existing -environment with a deep learning framework that was used for model training or creation; -or installation into a new environment. - -Installation into an Existing Environment with the Source Deep Learning Framework -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -To install OpenVINO Development Tools (see the :ref:`Install the Package ` -section of this article) into an existing environment with the deep learning framework used -for the model training or creation, run the following command: - -.. code-block:: sh - - pip install openvino-dev - - -Installation in a New Environment -+++++++++++++++++++++++++++++++++ - -If you do not have an environment with a deep learning framework for the input model or you -encounter any compatibility issues between OpenVINO and your version of deep learning -framework, you may install OpenVINO Development Tools with validated versions of -frameworks into a new environment. - -Step 1. Set Up Python Virtual Environment ------------------------------------------ - -Create a virtual Python environment to avoid dependency conflicts. To create a virtual -environment, use the following command: - -.. tab-set:: - - .. tab-item:: Windows - :sync: windows - - .. code-block:: sh - - python -m venv openvino_env - - .. tab-item:: Linux and macOS - :sync: linux-and-macos - - .. code-block:: sh - - python3 -m venv openvino_env - - - -Step 2. Activate Virtual Environment ------------------------------------- - -Activate the newly created Python virtual environment by issuing this command: - -.. tab-set:: - - .. tab-item:: Windows - :sync: windows - - .. code-block:: sh - - openvino_env\Scripts\activate - - .. tab-item:: Linux and macOS - :sync: linux-and-macos - - .. code-block:: sh - - source openvino_env/bin/activate - -.. important:: - - The above command must be re-run every time a new command terminal window is opened. - - -Step 3. Set Up and Update PIP to the Highest Version ----------------------------------------------------- - -Make sure `pip` is installed in your environment and upgrade it to the latest version by -issuing the following command: - -.. code-block:: sh - - python -m pip install --upgrade pip - - -.. _install_the_package: - -Step 4. Install the Package ---------------------------- - -To install and configure the components of the development package together with validated -versions of specific frameworks, use the commands below. - -.. code-block:: sh - - pip install openvino-dev[extras] - - -where the ``extras`` parameter specifies the source deep learning framework for the input model -and is one or more of the following values separated with "," : ``onnx``, ``pytorch``, -``tensorflow``, ``tensorflow2``. - -For example, to install and configure dependencies required for working with TensorFlow 2.x -and ONNX models, use the following command: - -.. code-block:: sh - - pip install openvino-dev[tensorflow2,onnx] - - -.. note:: - - Model conversion API support for TensorFlow 1.x environment has been deprecated. Use the - ``tensorflow2`` parameter to install a TensorFlow 2.x environment that can convert both - TensorFlow 1.x and 2.x models. If your model isn't compatible with the TensorFlow 2.x - environment, use the `tensorflow` parameter to install the TensorFlow 1.x environment. - The TF 1.x environment is provided only for legacy compatibility reasons. - -For more details on the openvino-dev PyPI package, see -`pypi.org `__ . - -Step 5. Test the Installation ------------------------------- - -To verify the package is properly installed, run the command below (this may take a few seconds): - -.. code-block:: sh - - mo -h - -You will see the help message for ``mo`` if installation finished successfully. If you get an -error, refer to the :doc:`Troubleshooting Guide <../../get-started/troubleshooting-install-config>` -for possible solutions. - -Congratulations! You finished installing OpenVINO Development Tools with C/C++ capability. -Now you can start exploring OpenVINO's functionality through example C/C++ applications. -See the "What's Next?" section to learn more! - -What's Next? -############ - -Learn more about OpenVINO and use it in your own application by trying out some of these examples! - -Get started with Python -+++++++++++++++++++++++ - -.. image:: ../../assets/images/get_started_with_python.gif - :width: 400 - -Try the `Python Quick Start Example <../../notebooks/vision-monodepth-with-output.html>`__ -to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook -inside your web browser. - -Visit the :doc:`Tutorials <../../learn-openvino/interactive-tutorials-python>` page for more -Jupyter Notebooks to get you started with OpenVINO, such as: - -* `OpenVINO Python API Tutorial <../../notebooks/openvino-api-with-output.html>`__ -* `Basic image classification program with Hello Image Classification <../../notebooks/hello-world-with-output.html>`__ -* `Convert a PyTorch model and use it for image background removal <../../notebooks/vision-background-removal-with-output.html>`__ - -Get started with C++ -++++++++++++++++++++ - -.. image:: ../../assets/images/get_started_with_cpp.jpg - :width: 400 - - -Try the :doc:`C++ Quick Start Example <../../learn-openvino/openvino-samples/get-started-demos>` -for step-by-step instructions on building and running a basic image classification C++ application. - -Visit the :doc:`Samples <../../learn-openvino/openvino-samples>` page for other C++ -example applications to get you started with OpenVINO, such as: - -* :doc:`Basic object detection with the Hello Reshape SSD C++ sample <../../learn-openvino/openvino-samples/hello-reshape-ssd>` -* :doc:`Object classification sample <../../learn-openvino/openvino-samples/hello-classification>` - -Learn OpenVINO Development Tools -++++++++++++++++++++++++++++++++ - -* Explore a variety of pre-trained deep learning models in the - :doc:`Open Model Zoo ` and deploy them in demo applications to see how they work. - - .. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - -* Want to import a model from another framework and optimize its performance with OpenVINO? - Visit the :doc:`Convert a Model ` page. -* Accelerate your model's speed even further with quantization and other compression techniques - using :doc:`Neural Network Compression Framework (NNCF) <../../openvino-workflow/model-optimization-guide/quantizing-models-post-training>`. -* Benchmark your model's inference speed with one simple command using the - :doc:`Benchmark Tool <../../learn-openvino/openvino-samples/benchmark-tool>`. - -Additional Resources -#################### - -- `Intel® Distribution of OpenVINO™ toolkit home page `__ diff --git a/docs/articles_en/documentation/legacy-features/model-zoo.rst b/docs/articles_en/documentation/legacy-features/model-zoo.rst deleted file mode 100644 index 4b761e6c7df831..00000000000000 --- a/docs/articles_en/documentation/legacy-features/model-zoo.rst +++ /dev/null @@ -1,31 +0,0 @@ -Model Zoo -========= - -.. _model zoo: - -.. note:: - - Since the deprecation of Open Model Zoo, OpenVINO has significantly extended its presence on the - `Hugging Face `__ model repository. It is currently - the recommended source of optimized OpenVINO IR models. - -Open Model Zoo for OpenVINO™ toolkit delivers a wide variety of free, pre-trained deep learning -models and demo applications that provide full application templates to help you implement deep -learning in Python, C++, or OpenCV Graph API (G-API). - -Models, demos and full documentation are available in the -`Open Model Zoo GitHub repo `__ -and licensed under Apache License Version 2.0. - -Browse through over 200 neural network models, both -`public `__ and from -`Intel `__, and pick the right one for your solution. -Types include object detection, classification, image segmentation, handwriting recognition, -text to speech, pose estimation, and others. The Intel models have already been converted -to work with OpenVINO™ toolkit, while public models can easily be converted using the -:doc:`OpenVINO Model Conversion API <../../openvino-workflow/model-preparation>` utility. - -Open Model Zoo offers a -`comprehensive set of demos `__ that you can adapt for implementing specific deep -learning scenarios in your applications. - diff --git a/docs/articles_en/documentation/legacy-features/multi-device.rst b/docs/articles_en/documentation/legacy-features/multi-device.rst deleted file mode 100644 index 594f496287d714..00000000000000 --- a/docs/articles_en/documentation/legacy-features/multi-device.rst +++ /dev/null @@ -1,155 +0,0 @@ -Multi-device execution -====================== - - -.. meta:: - :description: The Multi-Device execution mode in OpenVINO Runtime assigns - multiple available computing devices to particular inference - requests to execute in parallel. - -.. danger:: - - The Multi-device execution mode described here has been **deprecated**. - - It's functionality is now fully covered by the :ref:`CUMULATIVE_THROUGHPUT ` - option of the :doc:`Automatic Device Selection <../../openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection>` mode. - This way, all available devices in the system can be used without the need to specify them. - -How MULTI Works -#################### - -The Multi-Device execution mode, or MULTI for short, acts as a "virtual" or a "proxy" device, which does not bind to a specific type of hardware. Instead, it assigns available computing devices to particular inference requests, which are then executed in parallel. - -The potential gains from using Multi-Device execution are: - -* improved throughput from using multiple devices at once, -* increase in performance stability due to multiple devices sharing inference workload. - -Importantly, the Multi-Device mode does not change the application logic, so it does not require you to explicitly compile the model on every device or create and balance inference requests. It appears to use a typical device but internally handles the actual hardware. - -Note that the performance increase in this mode comes from utilizing multiple devices at once. This means that you need to provide the devices with enough inference requests to keep them busy, otherwise you will not benefit much from using MULTI. - - -Using the Multi-Device Mode -########################### - -Following the OpenVINO™ naming convention, the Multi-Device mode is assigned the label of “MULTI.” The only configuration option available for it is a prioritized list of devices to use: - - -+----------------------------+---------------------------------+------------------------------------------------------------+ -| Property | Property values | Description | -+============================+=================================+============================================================+ -| | | MULTI: | | Specifies the devices available for selection. | -| | | comma-separated, no spaces | | The device sequence will be taken as priority | -+----------------------------+---------------------------------+ | from high to low. | -| ``ov::device::priorities`` | | device names | | Priorities can be set directly as a string. | -| | | comma-separated, no spaces | | -+----------------------------+---------------------------------+------------------------------------------------------------+ - - -Specifying the device list explicitly is required by MULTI, as it defines the devices available for inference and sets their priorities. - -Note that OpenVINO™ Runtime enables you to use “GPU” as an alias for “GPU.0” in function calls. -More details on enumerating devices can be found in :doc:`Inference Devices and Modes <../../openvino-workflow/running-inference/inference-devices-and-modes>`. - -The following commands are accepted by the API: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/articles_en/assets/snippets/ov_multi.py - :language: python - :fragment: [MULTI_0] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/articles_en/assets/snippets/MULTI0.cpp - :language: cpp - :fragment: [part0] - - -To check what devices are present in the system, you can use the Device API. For information on how to do it, check :doc:`Query device properties and configuration <../../openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties>`. - - -Configuring Individual Devices and Creating the Multi-Device On Top -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -As mentioned previously, executing inference with MULTI may be set up by configuring individual devices before creating the "MULTI" device on top. It may be considered for performance reasons. - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/articles_en/assets/snippets/ov_multi.py - :language: python - :fragment: [MULTI_4] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/articles_en/assets/snippets/MULTI4.cpp - :language: cpp - :fragment: [part4] - - -Alternatively, you can combine all the individual device settings into a single config file and load it for MULTI to parse. See the code example in the next section. - -Querying the Optimal Number of Inference Requests -+++++++++++++++++++++++++++++++++++++++++++++++++ - -When using MULTI, you don't need to sum over included devices yourself, you can query the optimal number of requests directly, -using the :doc:`configure devices <../../openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties>` property: - -.. tab-set:: - - .. tab-item:: C++ - - .. doxygensnippet:: docs/articles_en/assets/snippets/MULTI5.cpp - :language: cpp - :fragment: [part5] - - -Using the Multi-Device with OpenVINO Samples and Benchmarking Performance -######################################################################### - -To see how the Multi-Device execution is used in practice and test its performance, take a look at OpenVINO's Benchmark Application which presents the optimal performance of the plugin without the need for additional settings, like the number of requests or CPU threads. -Here is an example command to evaluate performance of CPU + GPU: - -.. code-block:: sh - - ./benchmark_app –d MULTI:CPU,GPU –m -i -niter 1000 - - -For more information, refer to the :doc:`Benchmark Tool <../../../learn-openvino/openvino-samples/benchmark-tool>` article. - - -.. note:: - - You can keep using the FP16 IR without converting it to FP32, even if some of the listed devices do not support it. The conversion will be done automatically for you. - - No demos are yet fully optimized for MULTI, by means of supporting the ``ov::optimal_number_of_infer_requests`` property, using the GPU streams/throttling, and so on. - - -Performance Considerations for the Multi-Device Execution -######################################################### - -For best performance when using the MULTI execution mode you should consider a few recommendations: - -- MULTI usually performs best when the fastest device is specified first in the device candidate list. This is particularly important when the request-level parallelism is not sufficient (e.g. the number of requests is not enough to saturate all devices). -- Just like with any throughput-oriented execution mode, it is highly recommended to query the optimal number of inference requests directly from the instance of the ``ov:compiled_model``. Refer to the code of the previously mentioned ``benchmark_app`` for more details. -- Execution on certain device combinations, for example CPU+GPU, performs better with certain knobs. Refer to the ``benchmark_app`` code for details. One specific example is disabling GPU driver polling, which in turn requires multiple GPU streams to balance out slower communication of inference completion from the device to the host. -- The MULTI logic always attempts to save on copying data between device-agnostic and user-facing inference requests, and device-specific 'worker' requests that are being actually scheduled behind the scene. To facilitate the copy savings, it is recommended to run the requests in the order in which they were created. -- While performance of accelerators combines well with MULTI, the CPU+GPU execution may introduce certain performance issues. It is due to the devices sharing some resources, like power or bandwidth. Enabling the GPU throttling hint, which saves a CPU thread for CPU inference, is an example of a recommended solution addressing this issue. - - -Additional Resources -#################### - -- :doc:`Inference Devices and Modes <../../openvino-workflow/running-inference/inference-devices-and-modes>` -- :doc:`Automatic Device Selection <../../openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection>` - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api.rst deleted file mode 100644 index e031c10e7e4e08..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api.rst +++ /dev/null @@ -1,863 +0,0 @@ -Transition from Legacy Conversion API -===================================== - - -.. meta:: - :description: Transition guide from MO / mo.convert_model() to OVC / ov.convert_model(). - -.. toctree:: - :maxdepth: 1 - :hidden: - - transition-legacy-conversion-api/legacy-conversion-api - transition-legacy-conversion-api/legacy-model-optimizer-extensibility - -In the 2023.1 OpenVINO release OpenVINO Model Converter was introduced with the corresponding -Python API: ``openvino.convert_model`` method. ``ovc`` and ``openvino.convert_model`` represent -a lightweight alternative of ``mo`` and ``openvino.tools.mo.convert_model`` which are considered -legacy API now. In this article, all the differences between ``mo`` and ``ovc`` are summarized -and the transition guide from the legacy API to the new API is provided. - -Parameters Comparison -##################### - -The comparison of parameters between ov.convert_model() / OVC and mo.convert_model() / MO. - -.. list-table:: - :widths: 20 25 55 - :header-rows: 1 - - * - mo.convert_model() / MO - - ov.convert_model() / OVC - - Differences description - * - input_model - - input_model - - Along with model object or path to input model ov.convert_model() accepts list of model parts, for example, the path to TensorFlow weights plus the path to TensorFlow checkpoint. OVC tool accepts an unnamed input model. - * - output_dir - - output_model - - output_model in OVC tool sets both output model name and output directory. - * - model_name - - output_model - - output_model in OVC tool sets both output model name and output directory. - * - input - - input - - ov.convert_model() accepts tuples for setting multiple parameters. OVC tool 'input' does not have type setting and freezing functionality. ov.convert_model() does not allow input cut. - * - output - - output - - ov.convert_model() does not allow output cut. - * - input_shape - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by ``input`` parameter. - * - example_input - - example_input - - No differences. - * - batch - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by model reshape functionality. See details below. - * - mean_values - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - scale_values - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - scale - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - reverse_input_channels - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - source_layout - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - target_layout - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - layout - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - compress_to_fp16 - - compress_to_fp16 - - OVC provides 'compress_to_fp16' for command line tool only, as compression is performed during saving a model to IR (Intermediate Representation). - * - extensions - - extension - - No differences. - * - transform - - N/A - - Not available in ov.convert_model() / OVC. Can be replaced by functionality from ``PrePostProcessor``. See details below. - * - transformations_config - - N/A - - Not available in ov.convert_model() / OVC. - * - static_shape - - N/A - - Not available in ov.convert_model() / OVC. - * - freeze_placeholder_with_value - - N/A - - Not available in ov.convert_model() / OVC. - * - use_legacy_frontend - - N/A - - Not available in ov.convert_model() / OVC. - * - use_legacy_frontend - - N/A - - Not available in ov.convert_model() / OVC. - * - silent - - verbose - - OVC / ov.convert_model provides 'verbose' parameter instead of 'silent' for printing of detailed conversion information if 'verbose' is set to True. - * - log_level - - N/A - - Not available in ov.convert_model() / OVC. - * - version - - version - - N/A - * - progress - - N/A - - Not available in ov.convert_model() / OVC. - * - stream_output - - N/A - - Not available in ov.convert_model() / OVC. - * - share_weights - - share_weights - - No differences. - * - framework - - N/A - - Not available in ov.convert_model() / OVC. - * - help / -h - - help / -h - - OVC provides help parameter only in command line tool. - * - example_output - - output - - OVC / ov.convert_model 'output' parameter includes capabilities of MO 'example_output' parameter. - * - input_model_is_text - - N/A - - Not available in ov.convert_model() / OVC. - * - input_checkpoint - - input_model - - All supported model formats can be passed to 'input_model'. - * - input_meta_graph - - input_model - - All supported model formats can be passed to 'input_model'. - * - saved_model_dir - - input_model - - All supported model formats can be passed to 'input_model'. - * - saved_model_tags - - N/A - - Not available in ov.convert_model() / OVC. - * - tensorflow_custom_operations_config_update - - N/A - - Not available in ov.convert_model() / OVC. - * - tensorflow_object_detection_api_pipeline_config - - N/A - - Not available in ov.convert_model() / OVC. - * - tensorboard_logdir - - N/A - - Not available in ov.convert_model() / OVC. - * - tensorflow_custom_layer_libraries - - N/A - - Not available in ov.convert_model() / OVC. - * - input_symbol - - N/A - - Not available in ov.convert_model() / OVC. - * - nd_prefix_name - - N/A - - Not available in ov.convert_model() / OVC. - * - pretrained_model_name - - N/A - - Not available in ov.convert_model() / OVC. - * - save_params_from_nd - - N/A - - Not available in ov.convert_model() / OVC. - * - legacy_mxnet_model - - N/A - - Not available in ov.convert_model() / OVC. - * - enable_ssd_gluoncv - - N/A - - Not available in ov.convert_model() / OVC. - * - input_proto - - N/A - - Not available in ov.convert_model() / OVC. - * - caffe_parser_path - - N/A - - Not available in ov.convert_model() / OVC. - * - k - - N/A - - Not available in ov.convert_model() / OVC. - * - disable_omitting_optional - - N/A - - Not available in ov.convert_model() / OVC. - * - enable_flattening_nested_params - - N/A - - Not available in ov.convert_model() / OVC. - * - counts - - N/A - - Not available in ov.convert_model() / OVC. - * - remove_output_softmax - - N/A - - Not available in ov.convert_model() / OVC. - * - remove_memory - - N/A - - Not available in ov.convert_model() / OVC. - -Transition from Legacy API to New API -############################################################################ - -mo.convert_model() provides a wide range of preprocessing parameters. Most of these parameters have analogs in OVC or can be replaced with functionality from ``ov.PrePostProcessor`` class. -Here is the guide to transition from legacy model preprocessing to new API preprocessing. - - -``input_shape`` -################ - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, input_shape=[[1, 3, 100, 100],[1]]) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model, input=[[1, 3, 100, 100],[1]]) - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --input_shape [1,3,100,100],[1] --output_dir OUTPUT_DIR - - - .. code-block:: sh - :force: - - ovc MODEL_NAME --input [1,3,100,100],[1] --output_model OUTPUT_MODEL - -``batch`` -########## - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, batch=2) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - input_shape = ov_model.inputs[0].partial_shape - input_shape[0] = 2 # batch size - ov_model.reshape(input_shape) - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --batch 2 --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``mean_values`` -################ - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, mean_values=[0.5, 0.5, 0.5]) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - - prep = ov.preprocess.PrePostProcessor(ov_model) - prep.input(input_name).tensor().set_layout(ov.Layout("NHWC")) - prep.input(input_name).preprocess().mean([0.5, 0.5, 0.5]) - ov_model = prep.build() - - There is currently no heuristic for automatic detection of the channel to which mean, scale or reverse channels should be applied. ``Layout`` needs to be explicitly specified with "C" channel. For example "NHWC", "NCHW", "?C??". See also :doc:`Layout API overview <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview>`. - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --mean_values [0.5,0.5,0.5] --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``scale_values`` -################# - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, scale_values=[255., 255., 255.]) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - - prep = ov.preprocess.PrePostProcessor(ov_model) - prep.input(input_name).tensor().set_layout(ov.Layout("NHWC")) - prep.input(input_name).preprocess().scale([255., 255., 255.]) - ov_model = prep.build() - - There is currently no heuristic for automatic detection of the channel to which mean, scale or reverse channels should be applied. ``Layout`` needs to be explicitly specified with "C" channel. For example "NHWC", "NCHW", "?C??". See also :doc:`Layout API overview <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview>`. - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --scale_values [255,255,255] --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``reverse_input_channels`` -########################### - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, reverse_input_channels=True) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - - prep = ov.preprocess.PrePostProcessor(ov_model) - prep.input(input_name).tensor().set_layout(ov.Layout("NHWC")) - prep.input(input_name).preprocess().reverse_channels() - ov_model = prep.build() - - There is currently no heuristic for automatic detection of the channel to which mean, scale or reverse channels should be applied. ``Layout`` needs to be explicitly specified with "C" channel. For example "NHWC", "NCHW", "?C??". See also :doc:`Layout API overview <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview>`. - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --reverse_input_channels --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``source_layout`` -################## - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - import openvino as ov - from openvino.tools import mo - - ov_model = mo.convert_model(model, source_layout={input_name: ov.Layout("NHWC")}) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - - prep = ov.preprocess.PrePostProcessor(ov_model) - prep.input(input_name).model().set_layout(ov.Layout("NHWC")) - ov_model = prep.build() - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --source_layout input_name(NHWC) --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``target_layout`` -################## - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - import openvino as ov - from openvino.tools import mo - - ov_model = mo.convert_model(model, target_layout={input_name: ov.Layout("NHWC")}) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - - prep = ov.preprocess.PrePostProcessor(ov_model) - prep.input(input_name).tensor().set_layout(ov.Layout("NHWC")) - ov_model = prep.build() - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --target_layout input_name(NHWC) --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``layout`` -########### - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, layout={input_name: mo.LayoutMap("NCHW", "NHWC")}) - - - .. code-block:: py - :force: - - import openvino as ov - - ov_model = ov.convert_model(model) - - prep = ov.preprocess.PrePostProcessor(ov_model) - prep.input(input_name).model().set_layout(ov.Layout("NCHW")) - prep.input(input_name).tensor().set_layout(ov.Layout("NHWC")) - ov_model = prep.build() - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --layout "input_name(NCHW->NHWC)" --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -``transform`` -############## - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: py - :force: - - from openvino.tools import mo - - ov_model = mo.convert_model(model, transform=[('LowLatency2', {'use_const_initializer': False}), 'Pruning', ('MakeStateful', {'param_res_names': {'input_name': 'output_name'}})]) - - - .. code-block:: py - :force: - - import openvino as ov - from openvino._offline_transformations import apply_low_latency_transformation, apply_pruning_transformation, apply_make_stateful_transformation - - ov_model = ov.convert_model(model) - apply_low_latency_transformation(model, use_const_initializer=False) - apply_pruning_transformation(model) - apply_make_stateful_transformation(model, param_res_names={'input_name': 'output_name'}) - - .. tab-item:: CLI - :sync: cli - - .. list-table:: - :header-rows: 1 - - * - Legacy API - - New API - * - .. code-block:: sh - :force: - - mo --input_model MODEL_NAME --transform LowLatency2[use_const_initializer=False],Pruning,MakeStateful[param_res_names={'input_name':'output_name'}] --output_dir OUTPUT_DIR - - - Not available in OVC tool. Switch to the **Python** tab. - -Cutting Off Parts of a Model -############################ - -Performing surgery by cutting model inputs and outputs from a model is no longer available in the new conversion API. Instead, we recommend performing the cut in the original framework. -Below are examples of model cutting of TensorFlow protobuf, TensorFlow SavedModel, and ONNX formats with the legacy conversion API, compared to achieving the same cut with tools provided by the Tensorflow and ONNX frameworks. -For PyTorch, TensorFlow 2 Keras, and PaddlePaddle, we recommend changing the original model code to perform the model cut. - -Note: This guide does not cover the cutting a model by input port of an operation that MO tool provides using `input` and `output` options, for example, `--input 1:name_op`. - -``PyTorch`` -########### - -Model cut for PyTorch is not available in legacy API. - -When it is needed to remove a whole module from the model it is possible to replace such modules with `Identity`. Below is the example of removing `conv1` and `bn1` modules at the input and `fc` module at the output of the resnet50 model. - -.. code-block:: py - :force: - - import openvino as ov - import torch - import torchvision - from torch.nn import Identity - - # Load pretrained model - model = torchvision.models.resnet50(weights='DEFAULT') - - # input cut - model.conv1 = Identity() - model.bn1 = Identity() - - # output cut - model.fc = Identity() - - # convert and compile the model - ov_model = ov.convert_model(model, input=([-1,64,-1,-1], torch.float32)) - compiled_model = ov.compile_model(ov_model) - -When it is needed to remove one or more outputs from the model it is possible to create a wrapper for the model and only output the needed output. Below is the example of removing second output from the model. - -.. code-block:: py - :force: - - import openvino as ov - import torch - - # Example of model with multiple outputs - class Model(torch.nn.Module): - def __init__(self): - super(Model, self).__init__() - self.linear1 = torch.nn.Linear(100, 200) - self.activation1 = torch.nn.ReLU() - self.linear2 = torch.nn.Linear(200, 10) - self.activation2 = torch.nn.Sigmoid() - - def forward(self, x): - x = self.linear1(x) - x = self.activation1(x) - y = self.linear2(x) - y = self.activation2(y) - return x, y - - # New model, where some outputs are cut - class CutModel(torch.nn.Module): - def __init__(self): - super(CutModel, self).__init__() - self.model = Model() - - def forward(self, x): - - # get first output - x, _ = self.model(x) - - return x - - # Model with output cut - cut_model = CutModel() - - # convert and compile the model - ov_model = ov.convert_model(cut_model, input=([-1,-1,-1], torch.float32)) - compiled_model = ov.compile_model(ov_model) - - -``TensorFlow protobuf format / tf.Graph / tf.GraphDef`` -####################################################### - -Legacy API. - -.. code-block:: py - :force: - - import openvino as ov - import openvino.tools.mo as mo - - import tensorflow as tf - - def load_graph(model_path): - graph_def = tf.compat.v1.GraphDef() - with open(model_path, "rb") as f: - graph_def.ParseFromString(f.read()) - with tf.compat.v1.Graph().as_default() as graph: - tf.graph_util.import_graph_def(graph_def, name="") - return graph - - # Load TF model - graph = load_graph("/path_to_model/HugeCTR.pb") - - # Convert the model with input and output cut - input_name = "concat" - output_name = "MatVec_3/Squeeze" - ov_model = mo.convert_model(graph, input=(input_name, [-1, -1]), output=output_name) - - # Compile the model - compiled_model = ov.compile_model(ov_model) - -Model cut in original FW. - -.. code-block:: py - :force: - - import openvino as ov - import tensorflow as tf - - from tensorflow.python.tools.strip_unused_lib import strip_unused - - def load_graph(model_path): - graph_def = tf.compat.v1.GraphDef() - with open(model_path, "rb") as f: - graph_def.ParseFromString(f.read()) - with tf.compat.v1.Graph().as_default() as graph: - tf.graph_util.import_graph_def(graph_def, name="") - return graph - - # Load TF model - graph = load_graph("/path_to_model/HugeCTR.pb") - - # Cut the model - input_name = "concat" - output_name = "MatVec_3/Squeeze" - graph_def = graph.as_graph_def() - new_graph_def = strip_unused(graph_def, [input_name], [output_name], tf.float32.as_datatype_enum) - - # Convert and compile model - ov_model = ov.convert_model(new_graph_def, input=[-1, -1]) - cmp_model = ov.compile_model(ov_model) - - -``TensorFlow SavedModel format`` -################################ - -Model cut for SavedModel format is not available in legacy API. - -Example of model cut in original FW. - -.. code-block:: py - :force: - - import openvino as ov - import tensorflow_hub as hub - - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - from tensorflow.python.tools.strip_unused_lib import strip_unused - - # Load TF model - model = hub.load("https://tfhub.dev/svampeatlas/vision/embedder/fungi_V2/1?tf-hub-format=compressed") - - # Convert model to GraphDef - model_func = model.signatures["default"] - frozen_func = convert_variables_to_constants_v2(model_func) - graph_def = frozen_func.graph.as_graph_def() - - # Cut the model - input_name = 'InceptionV4/InceptionV4/Conv2d_2b_3x3/Relu' - output_name = 'InceptionV4/InceptionV4/Mixed_7c/concat' - new_graph_def = strip_unused(graph_def, [input_name], [output_name], tf.float32.as_datatype_enum) - - # Convert and compile the model - ov_model = ov.convert_model(new_graph_def) - compiled_model = ov.compile_model(ov_model) - - -``ONNX`` -######## - - -Legacy API. - -.. code-block:: py - :force: - - import openvino as ov - import openvino.tools.mo as mo - - input_path = "/path_to_model/yolov8x.onnx" - - # Convert model and perform input and output cut - input_name = "/model.2/Concat_output_0" - output_name = "/model.22/Concat_3_output_0" - ov_model = mo.convert_model(input_path, input=input_name, output=output_name) - - # Compile model - ov.compile_model(ov_model) - -Model cut in original FW. - -.. code-block:: py - :force: - - import onnx - import openvino as ov - - input_path = "/path_to_model/yolov8x.onnx" - - # Cut the model - input_name = "/model.2/Concat_output_0" - output_name = "/model.22/Concat_3_output_0" - cut_model_path = "/path_to_model/yolov8x_cut.onnx" - onnx.utils.extract_model(input_path, cut_model_path, [input_name], [output_name]) - - # Convert model - ov_model = ov.convert_model(cut_model_path) - - # Compile model - ov.compile_model(ov_model) - - -Supported Frameworks in MO vs OVC -################################# - -ov.convert_model() and OVC tool support conversion from PyTorch, TF, TF Lite, ONNX, PaddlePaddle. - - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.rst deleted file mode 100644 index 5302c7912995f6..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.rst +++ /dev/null @@ -1,188 +0,0 @@ -Legacy Conversion API -===================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - Setting Input Shapes - Troubleshooting Reshape Errors - Cutting Off Parts of a Model - Embedding Preprocessing Computation - Compressing a Model to FP16 - Convert Models Represented as Python Objects - Model Optimizer Frequently Asked Questions - Supported Model Formats - -.. meta:: - :description: Model conversion (MO) furthers the transition between training and - deployment environments, it adjusts deep learning models for - optimal execution on target devices. - -.. note:: - This part of the documentation describes a legacy approach to model conversion. Starting with OpenVINO 2023.1, a simpler alternative API for model conversion is available: ``openvino.convert_model`` and OpenVINO Model Converter ``ovc`` CLI tool. Refer to :doc:`Model preparation <../../../openvino-workflow/model-preparation>` for more details. If you are still using `openvino.tools.mo.convert_model` or `mo` CLI tool, you can still refer to this documentation. However, consider checking the :doc:`transition guide <../transition-legacy-conversion-api>` to learn how to migrate from the legacy conversion API to the new one. Depending on the model topology, the new API can be a better option for you. - -To convert a model to OpenVINO model format (``ov.Model``), you can use the following command: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model(INPUT_MODEL) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model INPUT_MODEL - - -If the out-of-the-box conversion (only the ``input_model`` parameter is specified) is not successful, use the parameters mentioned below to override input shapes and cut the model: - -- ``input`` and ``input_shape`` - the model conversion API parameters used to override original input shapes for model conversion, - - For more information about the parameters, refer to the :doc:`Setting Input Shapes ` guide. - -- ``input`` and ``output`` - the model conversion API parameters used to define new inputs and outputs of the converted model to cut off unwanted parts (such as unsupported operations and training sub-graphs), - - For a more detailed description, refer to the :doc:`Cutting Off Parts of a Model ` guide. - -- ``mean_values``, ``scales_values``, ``layout`` - the parameters used to insert additional input pre-processing sub-graphs into the converted model, - - For more details, see the :doc:`Embedding Preprocessing Computation ` article. - -- ``compress_to_fp16`` - a compression parameter in ``mo`` command-line tool, which allows generating IR with constants (for example, weights for convolutions and matrix multiplications) compressed to ``FP16`` data type. - - For more details, refer to the :doc:`Compression of a Model to FP16 ` guide. - -To get the full list of conversion parameters, run the following command: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model(help=True) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --help - - -Examples of model conversion parameters -####################################### - -Below is a list of separate examples for different frameworks and model conversion parameters: - -1. Launch model conversion for a TensorFlow MobileNet model in the binary protobuf format: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("MobileNet.pb") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model MobileNet.pb - - - Launch model conversion for a TensorFlow BERT model in the SavedModel format with three inputs. Specify input shapes explicitly where the batch size and the sequence length equal 2 and 30 respectively: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("BERT", input_shape=[[2,30],[2,30],[2,30]]) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --saved_model_dir BERT --input_shape [2,30],[2,30],[2,30] - - - For more information, refer to the :doc:`Converting a TensorFlow Model ` guide. - -2. Launch model conversion for an ONNX OCR model and specify new output explicitly: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("ocr.onnx", output="probabilities") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model ocr.onnx --output probabilities - - - For more information, refer to the :doc:`Converting an ONNX Model ` guide. - - .. note:: - - PyTorch models must be exported to the ONNX format before conversion into IR. More information can be found in :doc:`Converting a PyTorch Model `. - -3. Launch model conversion for a PaddlePaddle UNet model and apply mean-scale normalization to the input: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("unet.pdmodel", mean_values=[123,117,104], scale=255) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model unet.pdmodel --mean_values [123,117,104] --scale 255 - - - For more information, refer to the :doc:`Converting a PaddlePaddle Model ` guide. - -- To get conversion recipes for specific TensorFlow, ONNX, and PyTorch models, refer to the :doc:`Model Conversion Tutorials `. -- For more information about IR, see :doc:`Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™ <../../openvino-ir-format/operation-sets>`. -- For more information about support of neural network models trained with various frameworks, see :doc:`OpenVINO Extensibility Mechanism <../../openvino-extensibility>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-compressing-model-to-fp16.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-compressing-model-to-fp16.rst deleted file mode 100644 index c9e93036a3a7c2..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-compressing-model-to-fp16.rst +++ /dev/null @@ -1,53 +0,0 @@ -[LEGACY] Compressing a Model to FP16 -============================================= - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Conversion Parameters <../../../../openvino-workflow/model-preparation/conversion-parameters>` article. - -By default, when IR is saved all relevant floating-point weights are compressed to ``FP16`` data type during model conversion. -It results in creating a "compressed ``FP16`` model", which occupies about half of -the original space in the file system. The compression may introduce a minor drop in accuracy, -but it is negligible for most models. -In case if accuracy drop is significant user can disable compression explicitly. - -To disable compression, use the ``compress_to_fp16=False`` option: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.runtime import save_model - ov_model = save_model(INPUT_MODEL, compress_to_fp16=False) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model INPUT_MODEL --compress_to_fp16=False - - -For details on how plugins handle compressed ``FP16`` models, see -:doc:`Inference Devices and Modes <../../../../openvino-workflow/running-inference/inference-devices-and-modes>`. - -.. note:: - - ``FP16`` compression is sometimes used as the initial step for ``INT8`` quantization. - Refer to the :doc:`Post-training optimization <../../../../openvino-workflow/model-optimization-guide/quantizing-models-post-training>` guide for more - information about that. - - -.. note:: - - Some large models (larger than a few GB) when compressed to ``FP16`` may consume an overly large amount of RAM on the loading - phase of the inference. If that is the case for your model, try to convert it without compression: - ``convert_model(INPUT_MODEL, compress_to_fp16=False)`` or ``convert_model(INPUT_MODEL)`` - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-convert-models-as-python-objects.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-convert-models-as-python-objects.rst deleted file mode 100644 index 4921dc6bfa221f..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-convert-models-as-python-objects.rst +++ /dev/null @@ -1,150 +0,0 @@ -[LEGACY] Convert Models Represented as Python Objects -============================================================= - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Model Preparation <../../../../openvino-workflow/model-preparation>` article. - -Model conversion API is represented by ``convert_model()`` method in openvino.tools.mo namespace. ``convert_model()`` is compatible with types from openvino.runtime, like PartialShape, Layout, Type, etc. - -``convert_model()`` has the ability available from the command-line tool, plus the ability to pass Python model objects, such as a PyTorch model or TensorFlow Keras model directly, without saving them into files and without leaving the training environment (Jupyter Notebook or training scripts). In addition to input models consumed directly from Python, ``convert_model`` can take OpenVINO extension objects constructed directly in Python for easier conversion of operations that are not supported in OpenVINO. - -.. note:: - - Model conversion can be performed only when you install - :doc:`the development tools <../../../legacy-features/install-dev-tools>`, which provide - both the ``convert_model()`` method and ``mo`` command-line tool. - The functionality from this article is applicable for ``convert_model()`` only and it is - not present in command-line tool. - - -``convert_model()`` returns an openvino.runtime.Model object which can be compiled and inferred or serialized to IR. - -Example of converting a PyTorch model directly from memory: - -.. code-block:: py - :force: - - import torchvision - from openvino.tools.mo import convert_model - - model = torchvision.models.resnet50(weights='DEFAULT') - ov_model = convert_model(model) - -The following types are supported as an input model for ``convert_model()``: - -* PyTorch - ``torch.nn.Module``, ``torch.jit.ScriptModule``, ``torch.jit.ScriptFunction``. Refer to the :doc:`Converting a PyTorch Model <[legacy]-supported-model-formats/[legacy]-convert-pytorch>` article for more details. -* TensorFlow / TensorFlow 2 / Keras - ``tf.keras.Model``, ``tf.keras.layers.Layer``, ``tf.compat.v1.Graph``, ``tf.compat.v1.GraphDef``, ``tf.Module``, ``tf.function``, ``tf.compat.v1.session``, ``tf.train.checkpoint``. Refer to the :doc:`Converting a TensorFlow Model <[legacy]-supported-model-formats/[legacy]-convert-tensorflow>` article for more details. - -``convert_model()`` accepts all parameters available in the MO command-line tool. Parameters can be specified by Python classes or string analogs, similar to the command-line tool. - -Example of using native Python classes to set ``input_shape``, ``mean_values`` and ``layout``: - -.. code-block:: py - :force: - - from openvino.runtime import PartialShape, Layout - from openvino.tools.mo import convert_model - - ov_model = convert_model(model, input_shape=PartialShape([1,3,100,100]), mean_values=[127, 127, 127], layout=Layout("NCHW")) - -Example of using strings for setting ``input_shape``, ``mean_values`` and ``layout``: - -.. code-block:: py - :force: - - from openvino.runtime import Layout - from openvino.tools.mo import convert_model - - ov_model = convert_model(model, input_shape="[1,3,100,100]", mean_values="[127,127,127]", layout="NCHW") - - -The ``input`` parameter can be set by a ``tuple`` with a name, shape, and type. The input name of the type string is required in the tuple. The shape and type are optional. -The shape can be a ``list`` or ``tuple`` of dimensions (``int`` or ``openvino.runtime.Dimension``), or ``openvino.runtime.PartialShape``, or ``openvino.runtime.Shape``. The type can be of numpy type or ``openvino.runtime.Type``. - -Example of using a tuple in the ``input`` parameter to cut a model: - -.. code-block:: py - :force: - - from openvino.tools.mo import convert_model - - ov_model = convert_model(model, input=("input_name", [3], np.float32)) - -For complex cases, when a value needs to be set in the ``input`` parameter, the ``InputCutInfo`` class can be used. ``InputCutInfo`` accepts four parameters: ``name``, ``shape``, ``type``, and ``value``. - -``InputCutInfo("input_name", [3], np.float32, [0.5, 2.1, 3.4])`` is equivalent of ``InputCutInfo(name="input_name", shape=[3], type=np.float32, value=[0.5, 2.1, 3.4])``. - -Supported types for ``InputCutInfo``: - -* name: ``string``. -* shape: ``list`` or ``tuple`` of dimensions (``int`` or ``openvino.runtime.Dimension``), ``openvino.runtime.PartialShape``, ``openvino.runtime.Shape``. -* type: ``numpy type``, ``openvino.runtime.Type``. -* value: ``numpy.ndarray``, ``list`` of numeric values, ``bool``. - -Example of using ``InputCutInfo`` to freeze an input with value: - -.. code-block:: py - :force: - - from openvino.tools.mo import convert_model, InputCutInfo - - ov_model = convert_model(model, input=InputCutInfo("input_name", [3], np.float32, [0.5, 2.1, 3.4])) - -To set parameters for models with multiple inputs, use ``list`` of parameters. -Parameters supporting ``list``: - -* input -* input_shape -* layout -* source_layout -* dest_layout -* mean_values -* scale_values - -Example of using lists to set shapes, types and layout for multiple inputs: - -.. code-block:: py - :force: - - from openvino.runtime import Layout - from openvino.tools.mo import convert_model, LayoutMap - - ov_model = convert_model(model, input=[("input1", [1,3,100,100], np.float32), ("input2", [1,3,100,100], np.float32)], layout=[Layout("NCHW"), LayoutMap("NCHW", "NHWC")]) - -``layout``, ``source_layout`` and ``dest_layout`` accept an ``openvino.runtime.Layout`` object or ``string``. - -Example of using the ``Layout`` class to set the layout of a model input: - -.. code-block:: py - :force: - - from openvino.runtime import Layout - from openvino.tools.mo import convert_model - - ov_model = convert_model(model, source_layout=Layout("NCHW")) - -To set both source and destination layouts in the ``layout`` parameter, use the ``LayoutMap`` class. ``LayoutMap`` accepts two parameters: ``source_layout`` and ``target_layout``. - -``LayoutMap("NCHW", "NHWC")`` is equivalent to ``LayoutMap(source_layout="NCHW", target_layout="NHWC")``. - -Example of using the ``LayoutMap`` class to change the layout of a model input: - -.. code-block:: py - :force: - - from openvino.tools.mo import convert_model, LayoutMap - - ov_model = convert_model(model, layout=LayoutMap("NCHW", "NHWC")) - -Example of using the ``serialize`` method to save the converted model to OpenVINO IR: - -.. code-block:: py - :force: - - from openvino.runtime import serialize - - serialize(ov_model, "model.xml") - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-cutting-parts-of-a-model.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-cutting-parts-of-a-model.rst deleted file mode 100644 index 0406602a6e51fa..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-cutting-parts-of-a-model.rst +++ /dev/null @@ -1,585 +0,0 @@ -[LEGACY] Cutting Off Parts of a Model -================================================ - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - -Sometimes, it is necessary to remove parts of a model when converting it to OpenVINO IR. This chapter describes how to do it, using model conversion API parameters. Model cutting applies mostly to TensorFlow models, which is why TensorFlow will be used in this chapter's examples, but it may be also useful for other frameworks. - -Purpose of Model Cutting -######################## - -The following examples are the situations when model cutting is useful or even required: - -* A model has pre- or post-processing parts that cannot be translated to existing OpenVINO operations. -* A model has a training part that is convenient to be kept in the model but not used during inference. -* A model is too complex be converted at once, because it contains a lot of unsupported operations that cannot be easily implemented as custom layers. -* A problem occurs with model conversion or inference in OpenVINO™ Runtime. To identify the issue, limit the conversion scope by iterative search for problematic areas in the model. -* A single custom layer or a combination of custom layers is isolated for debugging purposes. - -.. note:: - - Internally, when you run model conversion API, it loads the model, goes through the topology, and tries to find each layer type in a list of known layers. Custom layers are layers that are not included in the list. If your topology contains such kind of layers, model conversion API classifies them as custom. - -Model conversion API parameters -############################### - -Model conversion API provides ``input`` and ``output`` command-line options to specify new entry and exit nodes, while ignoring the rest of the model: - -* ``input`` option accepts a list of layer names of the input model that should be treated as new entry points to the model. See the full list of accepted types for input on :doc:`Model Conversion Python API <[legacy]-convert-models-as-python-objects>` page. -* ``output`` option accepts a list of layer names of the input model that should be treated as new exit points from the model. - -The ``input`` option is required for cases unrelated to model cutting. For example, when the model contains several inputs and ``input_shape`` or ``mean_values`` options are used, the ``input`` option specifies the order of input nodes for correct mapping between multiple items provided in ``input_shape`` and ``mean_values`` and the inputs in the model. - -Model cutting is illustrated with the Inception V1 model, found in the ``models/research/slim`` repository. To proceed with this chapter, make sure you do the necessary steps to :doc:`prepare the model for model conversion <[legacy]-setting-input-shapes>`. - -Default Behavior without input and output -######################################### - -The input model is converted as a whole if neither ``input`` nor ``output`` command line options are used. All ``Placeholder`` operations in a TensorFlow graph are automatically identified as entry points. The ``Input`` layer type is generated for each of them. All nodes that have no consumers are automatically identified as exit points. - -For Inception_V1, there is one ``Placeholder``: input. If the model is viewed in TensorBoard, the input operation is easy to find: - -.. image:: ../../../../assets/images/inception_v1_std_input.svg - :alt: Placeholder in Inception V1 - -``Reshape`` is the only output operation, which is enclosed in a nested name scope of ``InceptionV1/Logits/Predictions``, under the full name of ``InceptionV1/Logits/Predictions/Reshape_1``. - -In TensorBoard, along with some of its predecessors, it looks as follows: - -.. image:: ../../../../assets/images/inception_v1_std_output.svg - :alt: TensorBoard with predecessors - -Convert this model to ``ov.Model``: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --output_dir - - -``ov.Model`` can be serialized with the ``ov.serialize()`` method to Intermediate Representation which can be used for model structure exploring. -In IR, the structure of a model has the following layers: - -.. code-block:: xml - :force: - - - - - 1 - 3 - 224 - 224 - - - - - -The ``input`` layer is converted from the TensorFlow graph ``Placeholder`` operation ``input`` and has the same name. - -The ``-b`` option is used here for conversion to override a possible undefined batch size (coded as -1 in TensorFlow models). If a model was frozen with a defined batch size, you may omit this option in all the examples. - -The last layer in the model is ``InceptionV1/Logits/Predictions/Reshape_1``, which matches an output operation in the TensorFlow graph: - -.. code-block:: xml - :force: - - - - - - 1 - 1001 - - - - - 1 - 1001 - - - - - -Due to automatic identification of inputs and outputs, providing the ``input`` and ``output`` options to convert the whole model is not required. The following commands are equivalent for the Inception V1 model: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1) - - ov_model = convert_model("inception_v1.pb", batch=1, input="input", output="InceptionV1/Logits/Predictions/Reshape_1") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --output_dir - - mo --input_model inception_v1.pb -b 1 --input input --output InceptionV1/Logits/Predictions/Reshape_1 --output_dir - - -The Intermediate Representations are identical for both conversions. The same is true if the model has multiple inputs and/or outputs. - -Model Cutting -#################### - -Now, consider how to cut some parts of the model off. This chapter describes the first convolution block ``InceptionV1/InceptionV1/Conv2d_1a_7x7`` of the Inception V1 model to illustrate cutting: - -.. image:: ../../../../assets/images/inception_v1_first_block.svg - :alt: Inception V1 first convolution block - -Cutting at the End -++++++++++++++++++++ - -If you want to cut your model at the end, you have the following options: - -1. The following command cuts off the rest of the model after the ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu``, making this node the last in the model: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir - - - The resulting Intermediate Representation has three layers: - - .. code-block:: xml - :force: - - - - - - - ... - - - - - - ... - - - ... - - - - - - - - - ... - - - ... - - - - - - - - - - - As shown in the TensorBoard picture, the original model has more nodes than its Intermediate Representation. Model conversion, using ``convert_model()``, consists of a set of model transformations, including fusing of batch normalization ``InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm`` with convolution ``InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution``, which is why it is not present in the final model. This is not an effect of the ``output`` option, it is the typical behavior of model conversion API for batch normalizations and convolutions. The effect of the ``output`` is that the ``ReLU`` layer becomes the last one in the converted model. - -2. The following command cuts the edge that comes from 0 output port of the ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`` and the rest of the model, making this node the last one in the model: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu:0") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu:0 --output_dir - - - The resulting Intermediate Representation has three layers, which are the same as in the previous case: - - .. code-block:: xml - :force: - - - - - - - ... - - - - - - ... - - - ... - - - - - - - - - ... - - - ... - - - - - - - - - - - This type of cutting is useful for cutting multiple output edges. - -3. The following command cuts the edge that comes to 0 input port of the ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`` and the rest of the model including ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu``, deleting this node and making the previous node ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Conv2D`` the last in the model: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, output="0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --output=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir - - - The resulting Intermediate Representation has two layers, which are the same as the first two layers in the previous case: - - .. code-block:: xml - :force: - - - - - - - ... - - - - - - ... - - - ... - - - - - - - - - - - - - -Cutting from the Beginning -++++++++++++++++++++++++++ - -If you want to go further and cut the beginning of the model, leaving only the ``ReLU`` layer, you have the following options: - -1. Use the following parameters, where ``input`` and ``output`` specify the same node in the graph: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu", input="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model=inception_v1.pb -b 1 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --input InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir - - - The resulting Intermediate Representation looks as follows: - - .. code-block:: xml - :force: - - - - - - - ... - - - - - ... - - - ... - - - - - - - - - - ``Input`` layer is automatically created to feed the layer that is converted from the node specified in ``input``, which is ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`` in this case. ``convert_model()`` does not replace the ``ReLU`` node by the ``Input`` layer. It produces such ``ov.Model`` to make the node the first executable node in the final Intermediate Representation. Therefore, model conversion creates enough ``Inputs`` to feed all input ports of the node that is passed in ``input``. - - Even though ``input_shape`` is not specified in the command line, the shapes for layers are inferred from the beginning of the original TensorFlow model to the point, at which the new input is defined. It has the same shape ``[1,64,112,112]`` as the model converted as a whole or without cutting off the beginning. - -2. Cut the edge incoming to layer by port number. To specify the incoming port, use the following notation ``input=port:input_node``. To cut everything before ``ReLU`` layer, cut the edge incoming to port 0 of ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`` node: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, input="0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu", output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --input 0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir - - - The resulting Intermediate Representation looks as follows: - - .. code-block:: xml - :force: - - - - - - - ... - - - - - ... - - - ... - - - - - - - - - - ``Input`` layer is automatically created to feed the layer that is converted from the node specified in ``input``, which is ``InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`` in this case. ``convert_model()`` does not replace the ``ReLU`` node by the ``Input`` layer, it produces such ``ov.Model`` to make the node be the first executable node in the final Intermediate Representation. Therefore, ``convert_model()`` creates enough ``Inputs`` to feed all input ports of the node that is passed in ``input``. - - Even though ``input_shape`` is not specified in the command line, the shapes for layers are inferred from the beginning of the original TensorFlow model to the point, at which the new input is defined. It has the same shape ``[1,64,112,112]`` as the model converted as a whole or without cutting off the beginning. - -3. Cut edge outcoming from layer by port number. To specify the outcoming port, use the following notation ``input=input_node:port``. To cut everything before ``ReLU`` layer, cut edge from ``InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1`` node to ``ReLU``: - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, input="InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1:0", output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1:0 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir - - - The resulting Intermediate Representation looks as follows: - - .. code-block:: xml - :force: - - - - - - - ... - - - - - ... - - - ... - - layer> - - - - - - - -Inputs with Multiple Input Ports -################################ - -There are operations that contain more than one input port. In the example considered here, the convolution ``InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution`` is such operation. When ``input_shape`` is not provided, a new ``Input`` layer is created for each dynamic input port for the node. If a port is evaluated to a constant blob, this constant remains in the model and a corresponding input layer is not created. TensorFlow convolution used in this model contains two ports: - -* port 0: input tensor for convolution (dynamic) -* port 1: convolution weights (constant) - -Following this behavior, ``convert_model()`` creates an ``Input`` layer for port 0 only, leaving port 1 as a constant. Thus, the result of: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", batch=1, input="InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --output_dir - - -is identical to the result of conversion of the model as a whole, because this convolution is the first executable operation in Inception V1. - -Different behavior occurs when ``input_shape`` is also used as an attempt to override the input shape: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", input="InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution", input_shape=[1,224,224,3]) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb--input=InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape [1,224,224,3] --output_dir - - -An error occurs (for more information, see the :ref:`Model Conversion FAQ `): - -.. code-block:: sh - - [ ERROR ] Node InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution has more than 1 input and input shapes were provided. - Try not to provide input shapes or specify input port with PORT:NODE notation, where PORT is an integer. - For more information, see FAQ #30 - -When ``input_shape`` is specified and the node contains multiple input ports, you need to provide an input port index together with an input node name. The input port index is specified in front of the node name with ``‘:’`` as a separator (``PORT:NODE``). In this case, the port index 0 of the node ``InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution`` should be specified as ``0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution``. - -The correct command line is: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("inception_v1.pb", input="0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution", input_shape=[1,224,224,3]) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model inception_v1.pb --input 0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape=[1,224,224,3] --output_dir - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation.rst deleted file mode 100644 index 1e1fe61e717eb3..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation.rst +++ /dev/null @@ -1,253 +0,0 @@ -[LEGACY] Embedding Preprocessing Computation -===================================================== - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Conversion Parameters <../../../../openvino-workflow/model-preparation/conversion-parameters>` article. - -Input data for inference can be different from the training dataset and requires -additional preprocessing before inference. To accelerate the whole pipeline including -preprocessing and inference, model conversion API provides special parameters such as ``mean_values``, -``scale_values``, ``reverse_input_channels``, and ``layout``. - -Based on these parameters, model conversion API generates OpenVINO IR with additionally inserted sub-graphs -to perform the defined preprocessing. This preprocessing block can perform mean-scale -normalization of input data, reverting data along channel dimension, and changing -the data layout. See the following sections for details on the parameters, or the -:doc:`Overview of Preprocessing API <../../../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing>` -for the same functionality in OpenVINO Runtime. - -Specifying Layout -################# - -You may need to set input layouts, as it is required by some preprocessing, for -example, setting a batch, applying mean or scales, and reversing input channels (BGR<->RGB). - -Layout defines the meaning of dimensions in shape and can be specified for both -inputs and outputs. Some preprocessing requires to set input layouts, for example, -setting a batch, applying mean or scales, and reversing input channels (BGR<->RGB). - -For the layout syntax, check the :doc:`Layout API overview <../../../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview>`. -To specify the layout, you can use the ``layout`` option followed by the layout value. - -For example, the following command specifies the ``NHWC`` layout for a Tensorflow -``nasnet_large`` model that was exported to the ONNX format: - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("tf_nasnet_large.onnx", layout="nhwc") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model tf_nasnet_large.onnx --layout nhwc - - -Additionally, if a model has more than one input or needs both input and output -layouts specified, you need to provide the name of each input or output to apply the layout. - -For example, the following command specifies the layout for an ONNX ``Yolo v3 Tiny`` -model with its first input ``input_1`` in ``NCHW`` layout and second input ``image_shape`` -having two dimensions: batch and size of the image expressed as the ``N?`` layout: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("yolov3-tiny.onnx", layout={"input_1": "nchw", "image_shape": "n?"}) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model yolov3-tiny.onnx --layout input_1(nchw),image_shape(n?) - - -Changing Model Layout -##################### - -Changing the model layout may be necessary if it differs from the one presented by input data. -Use either ``layout`` or ``source_layout`` with ``target_layout`` to change the layout. - -For example, for the same ``nasnet_large`` model mentioned previously, you can use -the following commands to provide data in the ``NCHW`` layout: - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("tf_nasnet_large.onnx", source_layout="nhwc", target_layout="nchw") - - ov_model = convert_model("tf_nasnet_large.onnx", layout="nhwc->nchw") - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model tf_nasnet_large.onnx --source_layout nhwc --target_layout nchw - - mo --input_model tf_nasnet_large.onnx --layout "nhwc->nchw" - - -Again, if a model has more than one input or needs both input and output layouts -specified, you need to provide the name of each input or output to apply the layout. - -For example, to provide data in the ``NHWC`` layout for the `Yolo v3 Tiny` model -mentioned earlier, use the following commands: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("yolov3-tiny.onnx", source_layout={"input_1": "nchw", "image_shape": "n?"}, target_layout={"input_1": "nhwc"}) - - ov_model = convert_model("yolov3-tiny.onnx", layout={"input_1": "nchw->nhwc", "image_shape": "n?"} - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model yolov3-tiny.onnx --source_layout "input_1(nchw),image_shape(n?)" --target_layout "input_1(nhwc)" - - mo --input_model yolov3-tiny.onnx --layout "input_1(nchw->nhwc),image_shape(n?)" - - -Specifying Mean and Scale Values -################################ - -Neural network models are usually trained with the normalized input data. This -means that the input data values are converted to be in a specific range, for example, -``[0, 1]`` or ``[-1, 1]``. Sometimes, the mean values (mean images) are subtracted -from the input data values as part of the preprocessing. - -There are two cases of how the input data preprocessing is implemented. - -* The input preprocessing operations are a part of a model. - - In this case, the application does not perform a separate preprocessing step: - everything is embedded into the model itself. ``convert_model()`` will generate the - ov.Model with required preprocessing operations, and no ``mean`` and - ``scale`` parameters are required. -* The input preprocessing operations are not a part of a model and the preprocessing - is performed within the application which feeds the model with input data. - - In this case, information about mean/scale values should be provided to ``convert_model()`` - to embed it to the generated ``ov.Model``. - -Model conversion API represented by ``convert_model()`` provides command-line parameters -to specify the values: ``mean_values``, ``scale_values``, ``scale``. Using these parameters, -model conversion API embeds the corresponding preprocessing block for mean-value -normalization of the input data and optimizes this block so that the preprocessing -takes negligible time for inference. - -For example, the following command runs model conversion for the PaddlePaddle UNet -model and applies mean-scale normalization to the input data: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("unet.pdmodel", mean_values=[123,117,104], scale=255) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model unet.pdmodel --mean_values [123,117,104] --scale 255 - - -Reversing Input Channels -######################## - -Sometimes, input images for your application can be of the RGB (or BGR) format -and the model is trained on images of the BGR (or RGB) format, which is in the -opposite order of color channels. In this case, it is important to preprocess the -input images by reverting the color channels before inference. - -To embed this preprocessing step into ``ov.Model``, model conversion API provides the -``reverse_input_channels`` command-line parameter to shuffle the color channels. - -The ``reverse_input_channels`` parameter can be used to preprocess the model -input in the following cases: - -* Only one dimension in the input shape has a size equal to ``3``. -* One dimension has an undefined size and is marked as ``C`` channel using ``layout`` parameters. - -Using the ``reverse_input_channels`` parameter, model conversion API embeds the corresponding -preprocessing block for reverting the input data along channel dimension and optimizes -this block so that the preprocessing takes only negligible time for inference. - -For example, the following command launches model conversion for the TensorFlow AlexNet -model and embeds the ``reverse_input_channel`` preprocessing block into OpenVINO IR: - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("alexnet.pb", reverse_input_channels=True) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model alexnet.pb --reverse_input_channels - - -.. note:: - - If both mean and scale values are specified, the mean is subtracted first and - then the scale is applied regardless of the order of options in the command-line. - Input values are *divided* by the scale value(s). If the ``reverse_input_channels`` - option is also used, ``reverse_input_channels`` will be applied first, then ``mean`` - and after that ``scale``. The data flow in the model looks as follows: - ``Parameter -> ReverseInputChannels -> Mean apply-> Scale apply -> the original body of the model``. - -Additional Resources -#################### - -* :doc:`Overview of Preprocessing API <../../../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-model-optimizer-faq.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-model-optimizer-faq.rst deleted file mode 100644 index f035101d715e9b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-model-optimizer-faq.rst +++ /dev/null @@ -1,947 +0,0 @@ -[LEGACY] Model Optimizer Frequently Asked Questions -=========================================================== - - -.. important:: - - All of the issues below refer to :doc:`legacy functionalities <../legacy-model-optimizer-extensibility>`. - -If your question is not covered by the topics below, use the -`OpenVINO Support page `__, -where you can participate in a free forum discussion. - -.. warning:: - - Note that OpenVINO support for Apache MXNet, Caffe, and Kaldi has been discontinued. - -.. _question-1: - -Q1. What does the message "[ ERROR ]: Current caffe.proto does not contain field" mean? -##################################################################################################################################################### - -**A:** Internally, Model Optimizer uses a protobuf library to parse and load Caffe models. This library requires a file grammar and a generated parser. For a Caffe fallback, Model Optimizer uses a Caffe-generated parser for a Caffe-specific ``.proto`` file (which is usually located in the ``src/caffe/proto`` directory). Make sure that you install exactly the same version of Caffe (with Python interface) as that was used to create the model. - -If you just want to experiment with Model Optimizer and test a Python extension for working with your custom -layers without building Caffe, add the layer description to the ``caffe.proto`` file and generate a parser for it. - -For example, to add the description of the ``CustomReshape`` layer, which is an artificial layer not present in any ``caffe.proto`` files: - -1. Add the following lines to the ``caffe.proto`` file: - - .. code-block:: shell - - package mo_caffe; // To avoid conflict with Caffe system, it is highly recommended to specify different package name. - ... - message LayerParameter { - // Other layers parameters description. - ... - optional CustomReshapeParameter custom_reshape_param = 546; // 546 - ID is any number not present in caffe.proto. - } - // The lines from here to the end of the file are describing contents of this parameter. - message CustomReshapeParameter { - optional BlobShape shape = 1; // Just use the same parameter type as some other Caffe layers. - } - - -2. Generate a new parser: - - .. code-block:: shell - - cd /openvino/tools/mo/front/caffe/proto - python3 generate_caffe_pb2.py --input_proto /src/caffe/proto/caffe.proto - - - where ``PATH_TO_CUSTOM_CAFFE`` is the path to the root directory of custom Caffe. - -3. Now, Model Optimizer is able to load the model into memory and start working with your extensions if there are any. - - However, since your model has custom layers, you must register them as custom. To learn more about it, refer to the :doc:`[Legacy] Custom Layers in Model Optimizer <../legacy-model-optimizer-extensibility>`. - -.. _question-2: - -Q2. How do I create a bare caffemodel, if I have only prototxt? -##################################################################################################################################################### - -**A:** You need the Caffe Python interface. In this case, do the following: - -.. code-block:: shell - - python3 - import caffe - net = caffe.Net('/my_net.prototxt', caffe.TEST) - net.save('/my_net.caffemodel') - - -.. _question-3: - -Q3. What does the message "[ ERROR ]: Unable to create ports for node with id" mean? -##################################################################################################################################################### - -**A:** Most likely, Model Optimizer does not know how to infer output shapes of some layers in the given topology. -To lessen the scope, compile the list of layers that are custom for Model Optimizer: present in the topology, -absent in the :doc:`list of supported operations <../../../../about-openvino/compatibility-and-support/supported-operations>` for the target framework. -Then, refer to available options in the corresponding section in the :doc:`[Legacy] Custom Layers in Model Optimizer <../legacy-model-optimizer-extensibility>` page. - -.. _question-7: - -Q7. What does the message "Invalid proto file: there is neither 'layer' nor 'layers' top-level messages" mean? -##################################################################################################################################################### - -**A:** The structure of any Caffe topology is described in the ``caffe.proto`` file of any Caffe version. For example, the following ``.proto`` file in Model Optimizer is used by default: ``mo/front/caffe/proto/my_caffe.proto``, with the structure: - -.. code-block:: sh - - message NetParameter { - // ... some other parameters - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; - } - - -This means that any topology should contain layers as top-level structures in ``prototxt``. For example, see the `LeNet topology `__. - -.. _question-8: - -Q8. What does the message "Old-style inputs (via 'input_dims') are not supported. Please specify inputs via 'input_shape'" mean? -##################################################################################################################################################### - -**A:** The structure of any Caffe topology is described in the ``caffe.proto`` file for any Caffe version. For example, the following ``.proto`` file in Model Optimizer is used by default: ``mo/front/caffe/proto/my_caffe.proto``, with the structure: - -.. code-block:: sh - - message NetParameter { - - optional string name = 1; // consider giving the network a name - // DEPRECATED. See InputParameter. The input blobs to the network. - repeated string input = 3; - // DEPRECATED. See InputParameter. The shape of the input blobs. - repeated BlobShape input_shape = 8; - // 4D input dimensions -- deprecated. Use "input_shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - // ... other parameters - } - - -Therefore, the input layer of the provided model must be specified in one of the following styles: - -* - - .. code-block:: sh - - input: "data" - input_shape - { - dim: 1 - dim: 3 - dim: 227 - dim: 227 - } - - -* - - .. code-block:: sh - - input: "data" - input_shape - { - dim: 1 - dim: 3 - dim: 600 - dim: 1000 - } - input: "im_info" - input_shape - { - dim: 1 - dim: 3 - } - -* - - .. code-block:: sh - - layer - { - name: "data" - type: "Input" - top: "data" - input_param {shape: {dim: 1 dim: 3 dim: 600 dim: 1000}} - } - layer - { - name: "im_info" - type: "Input" - top: "im_info" - input_param {shape: {dim: 1 dim: 3}} - } - -* - - .. code-block:: sh - - input: "data" - input_dim: 1 - input_dim: 3 - input_dim: 500 - - -However, if your model contains more than one input, Model Optimizer is able to convert the model with inputs specified in one of the first three forms in the above list. The 4th form is not supported for multi-input topologies. - -.. _question-9: - -Q9. What does the message "Mean file for topologies with multiple inputs is not supported" mean? -##################################################################################################################################################### - -**A:** Model Optimizer does not support mean file processing for topologies with more than one input. In this case, you need to perform preprocessing of the inputs for a generated Intermediate Representation in OpenVINO Runtime to perform subtraction for every input of your multi-input model. See the :doc:`Overview of Preprocessing <../../../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing>` for details. - -.. _question-11: - -Q11. What does the message "Invalid prototxt file: value error" mean? -##################################################################################################################################################### - -**A:** There are multiple reasons why Model Optimizer does not accept a Caffe topology. See FAQs :ref:`#7 ` and :ref:`#20 `. - -.. _question-12: - -Q12. What does the message "Error happened while constructing caffe.Net in the Caffe fallback function" mean? -##################################################################################################################################################### - -**A:** Model Optimizer tried to infer a specified layer via the Caffe framework. However, it cannot construct a net using the Caffe Python interface. Make sure that your ``caffemodel`` and ``prototxt`` files are correct. To ensure that the problem is not in the ``prototxt`` file, see FAQ :ref:`#2 `. - -.. _question-13: - -Q13. What does the message "Cannot infer shapes due to exception in Caffe" mean? -##################################################################################################################################################### - -**A:** Model Optimizer tried to infer a custom layer via the Caffe framework, but the model could not be inferred using Caffe. This might happen if you try to convert the model with some noise weights and biases, which conflict with layers that have dynamic shapes. You should write your own extension for every custom layer your topology might have. For more details, refer to the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility>` page. - -.. _question-14: - -Q14. What does the message "Cannot infer shape for node {} because there is no Caffe available. Please register python infer function for op or use Caffe for shape inference" mean? -#################################################################################################################################################################################### - -**A:** Your model contains a custom layer and you have correctly registered it with the ``CustomLayersMapping.xml`` file. These steps are required to offload shape inference of the custom layer with the help of the system Caffe. However, Model Optimizer could not import a Caffe package. Make sure that you have built Caffe with a ``pycaffe`` target and added it to the ``PYTHONPATH`` environment variable. At the same time, it is highly recommended to avoid dependency on Caffe and write your own Model Optimizer extension for your custom layer. For more information, refer to FAQ :ref:`#44 `. - -.. _question-15: - -Q15. What does the message "Framework name can not be deduced from the given options. Use --framework to choose one of Caffe, TensorFlow, MXNet" mean? -###################################################################################################################################################### - -**A:** You have run Model Optimizer without a flag ``--framework caffe|tf``. Model Optimizer tries to deduce the framework by the extension of input model file (``.pb`` for TensorFlow, ``.caffemodel`` for Caffe, ``.params`` for Apache MXNet). Your input model might have a different extension and you need to explicitly set the source framework. For example, use ``--framework caffe``. - -.. _question-16: - -Q16. What does the message "Input shape is required to convert MXNet model. Please provide it with --input_shape" mean? -##################################################################################################################################################### - -**A:** Input shape was not provided. That is mandatory for converting an MXNet model to the OpenVINO Intermediate Representation, because MXNet models do not contain information about input shapes. Use the ``--input_shape`` flag to specify it. For more information about using the ``--input_shape``, refer to FAQ :ref:`#56 `. - -.. _question-17: - -.. _question-18: - -.. _question-19: - -Q19. What does the message "Both --scale and --scale_values are defined. Specify either scale factor or scale values per input channels" mean? -##################################################################################################################################################### - -**A:** The ``--scale`` option sets a scaling factor for all channels, while ``--scale_values`` sets a scaling factor per each channel. Using both of them simultaneously produces ambiguity, so you must use only one of them. For more information, refer to the **Using Framework-Agnostic Conversion Parameters** section: for :doc:`Converting a TensorFlow Model <[legacy]-supported-model-formats/[legacy]-convert-tensorflow>`. - -.. _question-20: - -Q20. What does the message "Cannot find prototxt file: for Caffe please specify --input_proto - a protobuf file that stores topology and --input_model that stores pre-trained weights" mean? -############################################################################################################################################################################################## - -**A:** Model Optimizer cannot find a ``.prototxt`` file for a specified model. By default, it must be located in the same directory as the input model with the same name (except extension). If any of these conditions is not satisfied, use ``--input_proto`` to specify the path to the ``.prototxt`` file. - -.. _question-21: - -.. _question-22: - -Q22. What does the message "Failed to create directory .. . Permission denied!" mean? -##################################################################################################################################################### - -**A:** Model Optimizer cannot create a directory specified via ``--output_dir``. Make sure that you have enough permissions to create the specified directory. - -.. _question-23: - -Q23. What does the message "Discovered data node without inputs and value" mean? -##################################################################################################################################################### - -**A:** One of the layers in the specified topology might not have inputs or values. Make sure that the provided ``caffemodel`` and ``protobuf`` files are correct. - -.. _question-24: - -Q24. What does the message "Part of the nodes was not translated to IE. Stopped" mean? -##################################################################################################################################################### - -**A:** Some of the operations are not supported by OpenVINO Runtime and cannot be translated to OpenVINO Intermediate Representation. You can extend Model Optimizer by allowing generation of new types of operations and implement these operations in the dedicated OpenVINO plugins. For more information, refer to the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-25: - -Q25. What does the message "While creating an edge from .. to .. : node name is undefined in the graph. Check correctness of the input model" mean? -##################################################################################################################################################### - -**A:** Model Optimizer cannot build a graph based on a specified model. Most likely, it is incorrect. - -.. _question-26: - -Q26. What does the message "Node does not exist in the graph" mean? -##################################################################################################################################################### - -**A:** You might have specified an output node via the ``--output`` flag that does not exist in a provided model. Make sure that the specified output is correct and this node exists in the current model. - -.. _question-27: - -Q27. What does the message "--input parameter was provided. Other inputs are needed for output computation. Provide more inputs or choose another place to cut the net" mean? -############################################################################################################################################################################## - -**A:** Most likely, Model Optimizer tried to cut the model by a specified input. However, other inputs are needed. - -.. _question-28: - -Q28. What does the message "Placeholder node does not have an input port, but input port was provided" mean? -##################################################################################################################################################### - -**A:** You might have specified a placeholder node with an input node, while the placeholder node does not have it in the model. - -.. _question-29: - -Q29. What does the message "Port index is out of number of available input ports for node" mean? -##################################################################################################################################################### - -**A:** This error occurs when an incorrect input port is specified with the ``--input`` command line argument. When using ``--input``, you may optionally specify an input port in the form: ``X:node_name``, where ``X`` is an integer index of the input port starting from 0 and ``node_name`` is the name of a node in the model. This error occurs when the specified input port ``X`` is not in the range 0..(n-1), where n is the number of input ports for the node. Specify a correct port index, or do not use it if it is not needed. - -.. _question-30: - -Q30. What does the message "Node has more than 1 input and input shapes were provided. Try not to provide input shapes or specify input port with PORT:NODE notation, where PORT is an integer" mean? -###################################################################################################################################################################################################### - -**A:** This error occurs when an incorrect combination of the ``--input`` and ``--input_shape`` command line options is used. Using both ``--input`` and ``--input_shape`` is valid only if ``--input`` points to the ``Placeholder`` node, a node with one input port or ``--input`` has the form ``PORT:NODE``, where ``PORT`` is an integer port index of input for node ``NODE``. Otherwise, the combination of ``--input`` and ``--input_shape`` is incorrect. - - -.. _question-31: - -Q31. What does the message "Input port > 0 in --input is not supported if --input_shape is not provided. Node: NAME_OF_THE_NODE. Omit port index and all input ports will be replaced by placeholders. Or provide --input_shape" mean? -####################################################################################################################################################################################################################################### - -**A:** When using the ``PORT:NODE`` notation for the ``--input`` command line argument and ``PORT`` > 0, you should specify ``--input_shape`` for this input. This is a limitation of the current Model Optimizer implementation. - -.. note:: It is no longer relevant message since the limitation on input port index for model truncation has been resolved. - -.. _question-32: - -Q32. What does the message "No or multiple placeholders in the model, but only one shape is provided, cannot set it" mean? -##################################################################################################################################################### - -**A:** You might have provided only one shape for the placeholder, while there are none or multiple inputs in the model. Make sure that you have provided the correct data for placeholder nodes. - -.. _question-33: - -Q33. What does the message "The amount of input nodes for port is not equal to 1" mean? -##################################################################################################################################################### - -**A:** This error occurs when the ``SubgraphMatch.single_input_node`` function is used for an input port that supplies more than one node in a sub-graph. The ``single_input_node`` function can be used only for ports that has a single consumer inside the matching sub-graph. When multiple nodes are connected to the port, use the ``input_nodes`` function or ``node_by_pattern`` function instead of ``single_input_node``. For more details, refer to the **Graph Transformation Extensions** section in the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions>` guide. - -.. _question-34: - -Q34. What does the message "Output node for port has already been specified" mean? -##################################################################################################################################################### - -**A:** This error occurs when the ``SubgraphMatch._add_output_node`` function is called manually from user's extension code. This is an internal function, and you should not call it directly. - -.. _question-35: - -Q35. What does the message "Unsupported match kind.... Match kinds "points" or "scope" are supported only" mean? -##################################################################################################################################################### - -**A:** While using configuration file to implement a TensorFlow front replacement extension, an incorrect match kind was used. Only ``points`` or ``scope`` match kinds are supported. For more details, refer to the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility>` guide. - -.. _question-36: - -Q36. What does the message "Cannot write an event file for the TensorBoard to directory" mean? -##################################################################################################################################################### - -**A:** Model Optimizer tried to write an event file in the specified directory but failed to do that. That could happen when the specified directory does not exist or you do not have permissions to write in it. - -.. _question-37: - -Q37. What does the message "There is no registered 'infer' function for node with op = .. . Please implement this function in the extensions" mean? -##################################################################################################################################################### - -**A** Most likely, you tried to extend Model Optimizer with a new primitive, but you did not specify an infer function. For more information on extensions, see the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-38: - -Q38. What does the message "Stopped shape/value propagation at node" mean? -##################################################################################################################################################### - -**A:** Model Optimizer cannot infer shapes or values for the specified node. It can happen because of the following reasons: a bug exists in the custom shape infer function, the node inputs have incorrect values/shapes, or the input shapes are incorrect. - -.. _question-39: - -Q39. What does the message "The input with shape .. does not have the batch dimension" mean? -##################################################################################################################################################### - -**A:** Batch dimension is the first dimension in the shape and it should be equal to 1 or undefined. In your case, it is not either equal to 1 or undefined, which is why the ``-b`` shortcut produces undefined and unspecified behavior. To resolve the issue, specify full shapes for each input with the ``--input_shape`` option. Run Model Optimizer with the ``--help`` option to learn more about the notation for input shapes. - -.. _question-40: - -Q40. What does the message "Not all output shapes were inferred or fully defined for node" mean? -##################################################################################################################################################### - -**A:** Most likely, the shape is not defined (partially or fully) for the specified node. You can use ``--input_shape`` with positive integers to override model input shapes. - -.. _question-41: - -Q41. What does the message "Shape for tensor is not defined. Can not proceed" mean? -##################################################################################################################################################### - -**A:** This error occurs when the ``--input`` command-line option is used to cut a model and ``--input_shape`` is not used to override shapes for a node, so a shape for the node cannot be inferred by Model Optimizer. You need to help Model Optimizer by specifying shapes with ``--input_shape`` for each node specified with the ``--input`` command-line option. - -.. _question-42: - -Q42. What does the message "Module TensorFlow was not found. Please install TensorFlow 1.2 or higher" mean? -##################################################################################################################################################### - -**A:** To convert TensorFlow models with Model Optimizer, TensorFlow 1.2 or newer must be installed. For more information on prerequisites, see the :doc:`Configuring Model Optimizer <../legacy-conversion-api>` guide. - -.. _question-43: - -Q43. What does the message "Cannot read the model file: it is incorrect TensorFlow model file or missing" mean? -##################################################################################################################################################### - -**A:** The model file should contain a frozen TensorFlow graph in the text or binary format. Make sure that ``--input_model_is_text`` is provided for a model in the text format. By default, a model is interpreted as binary file. - -.. _question-44: - -Q44. What does the message "Cannot pre-process TensorFlow graph after reading from model file. File is corrupt or has unsupported format" mean? -##################################################################################################################################################### - -**A:** Most likely, there is a problem with the specified file for the model. The file exists, but it has an invalid format or is corrupted. - -.. _question-45: - -Q45. What does the message "Found custom layer. Model Optimizer does not support this layer. Please, register it in CustomLayersMapping.xml or implement extension" mean? -########################################################################################################################################################################## - -**A:** This means that the layer ``{layer_name}`` is not supported in Model Optimizer. You will find a list of all unsupported layers in the corresponding section. You should implement the extensions for this layer. See :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` for more information. - -.. _question-46: - -Q46. What does the message "Custom replacement configuration file does not exist" mean? -##################################################################################################################################################### - -**A:** A path to the custom replacement configuration file was provided with the ``--transformations_config`` flag, but the file could not be found. Make sure the specified path is correct and the file exists. - -.. _question-47: - -Q47. What does the message "Extractors collection have case insensitive duplicates" mean? -##################################################################################################################################################### - -**A:** When extending Model Optimizer with new primitives, keep in mind that their names are case-insensitive. Most likely, another operation with the same name is already defined. For more information, see the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-48: - -Q48. What does the message "Input model name is not in an expected format, cannot extract iteration number" mean? -##################################################################################################################################################### - -**A:** Model Optimizer cannot load an MXNet model in the specified file format. Make sure you use the ``.json`` or ``.param`` format. - -.. _question-49: - -Q49. What does the message "Cannot convert type of placeholder because not all of its outputs are 'Cast' to float operations" mean? -##################################################################################################################################################### - -**A:** There are models where ``Placeholder`` has the UINT8 type and the first operation after it is 'Cast', which casts the input to FP32. Model Optimizer detected that the ``Placeholder`` has the UINT8 type, but the next operation is not 'Cast' to float. Model Optimizer does not support such a case. Make sure you change the model to have ``Placeholder`` for FP32. - -.. _question-50: - -Q50. What does the message "Data type is unsupported" mean? -##################################################################################################################################################### - -**A:** Model Optimizer cannot read the value with the specified data type. Currently, the following types are supported: bool, float16, float32, double, int8, int16, int32, int64, uint8, uint16, uint32, uint64, str. - -.. _question-51: - -Q51. What does the message "No node with name ..." mean? -##################################################################################################################################################### - -**A:** Model Optimizer tried to access a node that does not exist. This could happen if you have incorrectly specified placeholder, input or output node name. - -.. _question-52: - -Q52. What does the message "Module MXNet was not found. Please install MXNet 1.0.0" mean? -##################################################################################################################################################### - -**A:** To convert MXNet models with Model Optimizer, Apache MXNet 1.0.0 must be installed. For more information about prerequisites, see the :doc:`Configuring Model Optimizer <../legacy-conversion-api>` guide. - -.. _question-53: - -Q53. What does the message "The following error happened while loading MXNet model .." mean? -##################################################################################################################################################### - -**A:** Most likely, there is a problem with loading of the MXNet model. Make sure the specified path is correct, the model exists and is not corrupted, and you have sufficient permissions to work with it. - -.. _question-54: - -Q54. What does the message "The following error happened while processing input shapes: .." mean? -##################################################################################################################################################### - -**A:** Make sure inputs are defined and have correct shapes. You can use ``--input_shape`` with positive integers to override model input shapes. - -.. _question-55: - -Q55. What does the message "Attempt to register of custom name for the second time as class. Note that custom names are case-insensitive" mean? -##################################################################################################################################################### - -**A:** When extending Model Optimizer with new primitives, keep in mind that their names are case-insensitive. Most likely, another operation with the same name is already defined. For more information, see the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-56: - -Q56. What does the message "Both --input_shape and --batch were provided. Please, provide only one of them" mean? -##################################################################################################################################################### - -**A:** Specifying the batch and the input shapes at the same time is not supported. You must specify a desired batch as the first value of the input shape. - -.. _question-57: - -Q57. What does the message "Input shape .. cannot be parsed" mean? -##################################################################################################################################################### - -**A:** The specified input shape cannot be parsed. Define it in one of the following ways: - -* - - .. code-block:: shell - - mo --input_model .caffemodel --input_shape (1,3,227,227) - -* - - .. code-block:: shell - - mo --input_model .caffemodel --input_shape [1,3,227,227] - -* In case of multi input topology you should also specify inputs: - - .. code-block:: shell - - mo --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),(1,6,1,1) - - -Keep in mind that there is no space between and inside the brackets for input shapes. - -.. _question-58: - -Q58. What does the message "Please provide input layer names for input layer shapes" mean? -##################################################################################################################################################### - -**A:** When specifying input shapes for several layers, you must provide names for inputs, whose shapes will be overwritten. Additional information for ``--input_shape`` is in FAQ :ref:`#56 `. - -.. _question-59: - -Q59. What does the message "Values cannot be parsed" mean? -##################################################################################################################################################### - -**A:** Mean values for the given parameter cannot be parsed. It should be a string with a list of mean values. For example, in '(1,2,3)', 1 stands for the RED channel, 2 for the GREEN channel, 3 for the BLUE channel. - -.. _question-60: - -Q60. What does the message ".. channels are expected for given values" mean? -##################################################################################################################################################### - -**A:** The number of channels and the number of given values for mean values do not match. The shape should be defined as '(R,G,B)' or '[R,G,B]'. The shape should not contain undefined dimensions (? or -1). The order of values is as follows: (value for a RED channel, value for a GREEN channel, value for a BLUE channel). - -.. _question-61: - -Q61. What does the message "You should specify input for each mean value" mean? -##################################################################################################################################################### - -**A:** Most likely, you didn't specify inputs using ``--mean_values``. Specify inputs with the ``--input`` flag. For usage examples, refer to the FAQ :ref:`#62 `. - -.. _question-62: - -Q62. What does the message "You should specify input for each scale value" mean? -##################################################################################################################################################### - -**A:** Most likely, you didn't specify inputs using ``--scale_values``. Specify inputs with the ``--input`` flag. For usage examples, refer to the FAQ :ref:`#63 `. - -.. _question-63: - -Q63. What does the message "Number of inputs and mean values does not match" mean? -##################################################################################################################################################### - -**A:** The number of specified mean values and the number of inputs must be equal. - -.. _question-64: - -Q64. What does the message "Number of inputs and scale values does not match" mean? -##################################################################################################################################################### - -**A:** The number of specified scale values and the number of inputs must be equal. - -.. _question-65: - -Q65. What does the message "No class registered for match kind ... Supported match kinds are .. " mean? -##################################################################################################################################################### - -**A:** A replacement defined in the configuration file for sub-graph replacement, using node names patterns or start/end nodes, has the ``match_kind`` attribute. The attribute may have only one of the values: ``scope`` or ``points``. If a different value is provided, this error is displayed. - -.. _question-66: - -Q66. What does the message "No instance(s) is(are) defined for the custom replacement" mean? -##################################################################################################################################################### - -**A:** A replacement defined in the configuration file for sub-graph replacement, using node names patterns or start/end nodes, has the ``instances`` attribute. This attribute is mandatory. This error will occur if the attribute is missing. For more details, refer to the **Graph Transformation Extensions** section in the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility>` guide. - -.. _question-67: - -Q67. What does the message "The instance must be a single dictionary for the custom replacement with id .." mean? -##################################################################################################################################################### - -**A:** A replacement defined in the configuration file for sub-graph replacement, using start/end nodes, has the ``instances`` attribute. For this type of replacement, the instance must be defined with a dictionary with two keys ``start_points`` and ``end_points``. Values for these keys are lists with the start and end node names, respectively. For more details, refer to the **Graph Transformation Extensions** section in the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions>` guide. - -.. _question-68: - -Q68. What does the message "No instances are defined for replacement with id .. " mean? -##################################################################################################################################################### - -**A:** A replacement for the specified id is not defined in the configuration file. For more information, refer to the FAQ :ref:`#65 `. - -.. _question-69: - -Q69. What does the message "Custom replacements configuration file .. does not exist" mean? -##################################################################################################################################################### - -**A:** The path to a custom replacement configuration file was provided with the ``--transformations_config`` flag, but it cannot be found. Make sure the specified path is correct and the file exists. - -.. _question-70: - -Q70. What does the message "Failed to parse custom replacements configuration file .." mean? -##################################################################################################################################################### - -**A:** The file for custom replacement configuration provided with the ``--transformations_config`` flag cannot be parsed. In particular, it should have a valid JSON structure. For more details, refer to the `JSON Schema Reference `__ page. - -.. _question-71: - -Q71. What does the message "One of the custom replacements in the configuration file .. does not contain attribute 'id'" mean? -##################################################################################################################################################### - -**A:** Every custom replacement should declare a set of mandatory attributes and their values. For more details, refer to FAQ :ref:`#71 `. - -.. _question-72: - -Q72. What does the message "File .. validation failed" mean? -##################################################################################################################################################### - -**A:** The file for custom replacement configuration provided with the ``--transformations_config`` flag cannot pass validation. Make sure you have specified ``id``, ``instances``, and ``match_kind`` for all the patterns. - -.. _question-73: - -Q73. What does the message "Cannot update the file .. because it is broken" mean? -##################################################################################################################################################### - -**A:** The custom replacement configuration file provided with the ``--tensorflow_custom_operations_config_update`` cannot be parsed. Make sure that the file is correct and refer to FAQ :ref:`#68 `, :ref:`#69 `, :ref:`#70 `, and :ref:`#71 `. - -.. _question-74: - -Q74. What does the message "End node .. is not reachable from start nodes: .." mean? -##################################################################################################################################################### - -**A:** This error occurs when you try to make a sub-graph match. It is detected that between the start and end nodes that were specified as inputs/outputs for the subgraph to find, there are nodes marked as outputs but there is no path from them to the input nodes. Make sure the subgraph you want to match does actually contain all the specified output nodes. - -.. _question-75: - -Q75. What does the message "Sub-graph contains network input node .." mean? -##################################################################################################################################################### - -**A:** The start or end node for the sub-graph replacement using start/end nodes is specified incorrectly. Model Optimizer finds internal nodes of the sub-graph strictly "between" the start and end nodes, and then adds all input nodes to the sub-graph (and the inputs of their inputs, etc.) for these "internal" nodes. This error reports that Model Optimizer reached input node during this phase. This means that the start/end points are specified incorrectly in the configuration file. For more details, refer to the **Graph Transformation Extensions** section in the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions>` guide. - -.. _question-76: - -Q76. What does the message "... elements of ... were clipped to infinity while converting a blob for node [...] to ..." mean? -##################################################################################################################################################### - -**A:** This message may appear when the ``--compress_to_fp16`` command-line option is used. This option implies compression of all the model weights, biases, and other constant values to FP16. If a value of a constant is out of the range of valid FP16 values, the value is converted to positive or negative infinity. It may lead to incorrect results of inference or may not be a problem, depending on the model. The number of such elements and the total number of elements in the constant value is printed out together with the name of the node, where this value is used. - -.. _question-77: - -Q77. What does the message "... elements of ... were clipped to zero while converting a blob for node [...] to ..." mean? -##################################################################################################################################################### - -**A:** This message may appear when the ``--compress_to_fp16`` command-line option is used. This option implies conversion of all blobs in the mode to FP16. If a value in the blob is so close to zero that it cannot be represented as a valid FP16 value, it is converted to a true zero FP16 value. Depending on the model, it may lead to incorrect results of inference or may not be a problem. The number of such elements and the total number of elements in the blob are printed out together with a name of the node, where this blob is used. - -.. _question-78: - -Q78. What does the message "The amount of nodes matched pattern ... is not equal to 1" mean? -##################################################################################################################################################### - -**A:** This error occurs when the ``SubgraphMatch.node_by_pattern`` function is used with a pattern that does not uniquely identify a single node in a sub-graph. Try to extend the pattern string to make unambiguous match to a single sub-graph node. For more details, refer to the **Graph Transformation Extensions** section in the :doc:`[Legacy] Model Optimizer Extensibility <../legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions>` guide. - -.. _question-79: - -Q79. What does the message "The topology contains no "input" layers" mean? -##################################################################################################################################################### - -**A:** Your Caffe topology ``.prototxt`` file is intended for training. Model Optimizer expects a deployment-ready ``.prototxt`` file. To fix the problem, prepare a deployment-ready ``.prototxt`` file. Preparation of a deploy-ready topology usually results in removing ``data`` layer(s), adding ``input`` layer(s), and removing loss layer(s). - -.. _question-80: - -Q80. What does the message "Warning: please expect that Model Optimizer conversion might be slow" mean? -##################################################################################################################################################### - -**A:** You are using an unsupported Python version. Use only versions 3.4 - 3.6 for the C++ ``protobuf`` implementation that is supplied with OpenVINO toolkit. You can still boost the conversion speed by building the protobuf library from sources. For complete instructions about building ``protobuf`` from sources, see the appropriate section in the :doc:`Converting a Model to Intermediate Representation <../legacy-conversion-api>` guide. - -.. _question-81: - -Q81. What does the message "Arguments --nd_prefix_name, --pretrained_model_name and --input_symbol should be provided. Please provide all or do not use any." mean? -#################################################################################################################################################################### - -**A:** This error occurs if you did not provide the ``--nd_prefix_name``, ``--pretrained_model_name``, and ``--input_symbol`` parameters. -Model Optimizer requires both ``.params`` and ``.nd`` model files to merge into the result file (``.params``). -Topology description (``.json`` file) should be prepared (merged) in advance and provided with the ``--input_symbol`` parameter. - -If you add additional layers and weights that are in ``.nd`` files to your model, Model Optimizer can build a model -from one ``.params`` file and two additional ``.nd`` files (``*_args.nd``, ``*_auxs.nd``). -To do that, provide both CLI options or do not pass them if you want to convert an MXNet model without additional weights. - -.. _question-82: - -Q82. What does the message "You should specify input for mean/scale values" mean? -##################################################################################################################################################### - -**A:** When the model has multiple inputs and you want to provide mean/scale values, you need to pass those values for each input. More specifically, the number of passed values should be the same as the number of inputs of the model. -For more information, refer to the :doc:`Converting a Model to Intermediate Representation <[legacy]-setting-input-shapes>` guide. - -.. _question-83: - -Q83. What does the message "Input with name ... not found!" mean? -##################################################################################################################################################### - -**A:** When you passed the mean/scale values and specify names of input layers of the model, you might have used the name that does not correspond to any input layer. Make sure that you list only names of the input layers of your model when passing values with the ``--input`` option. -For more information, refer to the :doc:`Converting a Model to Intermediate Representation <[legacy]-setting-input-shapes>` guide. - -.. _question-84: - -Q84. What does the message "Specified input json ... does not exist" mean? -##################################################################################################################################################### - -**A:** Most likely, ``.json`` file does not exist or has a name that does not match the notation of Apache MXNet. Make sure the file exists and has a correct name. - -.. _question-85: - -Q85. What does the message "Unsupported Input model file type ... Model Optimizer support only .params and .nd files format" mean? -##################################################################################################################################################### - -**A:** Model Optimizer for Apache MXNet supports only ``.params`` and ``.nd`` files formats. Most likely, you specified an unsupported file format in ``--input_model``. - -.. _question-86: - -Q86. What does the message "Operation ... not supported. Please register it as custom op" mean? -##################################################################################################################################################### - -**A:** Model Optimizer tried to load the model that contains some unsupported operations. -If you want to convert model that contains unsupported operations, you need to prepare extension for all such operations. -For more information, refer to the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-87: - -Q87. What does the message "Can not register Op ... Please, call function 'register_caffe_python_extractor' with parameter 'name'" mean? -##################################################################################################################################################### - -**A:** This error appears if the class of implementation of ``Op`` for Python Caffe layer could not be used by Model Optimizer. Python layers should be handled differently comparing to ordinary Caffe layers. - -In particular, you need to call the function ``register_caffe_python_extractor`` and pass ``name`` as the second argument of the function. -The name should be the compilation of the layer name with the module name separated by a dot. - -For example, your topology contains this layer with type ``Python``: - -.. code-block:: py - :force: - - layer { - name: 'proposal' - type: 'Python' - ... - python_param { - module: 'rpn.proposal_layer' - layer: 'ProposalLayer' - param_str: "'feat_stride': 16" - } - } - - -The first step is to implement an extension for this layer in Model Optimizer as an ancestor of ``Op`` class: - -.. code-block:: py - :force: - - class ProposalPythonExampleOp(Op): - op = 'Proposal' - - def __init__(self, graph: nx.MultiDiGraph, attrs: dict): - ... - - -It is mandatory to call two functions right after the implementation of that class: - -.. code-block:: py - :force: - - class ProposalPythonExampleOp(Op): - ... - - register_caffe_python_extractor(ProposalPythonExampleOp, 'rpn.proposal_layer.ProposalLayer') - Op.excluded_classes.append(ProposalPythonExampleOp) - - -Note that the first call ``register_caffe_python_extractor(ProposalPythonExampleOp, 'rpn.proposal_layer.ProposalLayer')`` registers an extension of the layer in Model Optimizer, which will be found by the specific name (mandatory to join module name and layer name): ``rpn.proposal_layer.ProposalLayer``. - -The second call prevents Model Optimizer from using this extension as if it is an extension for -a layer with type ``Proposal``. Otherwise, this layer can be chosen as an implementation of extension that can lead to potential issues. -For more information, refer to the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-88: - -Q88. What does the message "Model Optimizer is unable to calculate output shape of Memory node .." mean? -##################################################################################################################################################### - -**A:** Model Optimizer supports only ``Memory`` layers, in which ``input_memory`` goes before ``ScaleShift`` or the ``FullyConnected`` layer. -This error message means that in your model the layer after input memory is not of the ``ScaleShift`` or ``FullyConnected`` type. -This is a known limitation. - -.. _question-89: - -Q89. What do the messages "File ... does not appear to be a Kaldi file (magic number does not match)", "Kaldi model should start with tag" mean? -######################################################################################################################################################### - -**A:** These error messages mean that Model Optimizer does not support your Kaldi model, because the ``checksum`` of the model is not -16896 (the model should start with this number), or the model file does not contain the ```` tag as a starting one. -Make sure that you provide a path to a true Kaldi model and try again. - -.. _question-90: - -Q90. What do the messages "Expect counts file to be one-line file." or "Expect counts file to contain list of integers" mean? -##################################################################################################################################################### - -**A:** These messages mean that the file counts you passed contain not one line. The count file should start with -``[`` and end with ``]``, and integer values should be separated by spaces between those brackets. - -.. _question-91: - -Q91. What does the message "Model Optimizer is not able to read Kaldi model .." mean? -##################################################################################################################################################### - -**A:** There are multiple reasons why Model Optimizer does not accept a Kaldi topology, including: -the file is not available or does not exist. Refer to FAQ :ref:`#88 `. - -.. _question-92: - -Q92. What does the message "Model Optimizer is not able to read counts file .." mean? -##################################################################################################################################################### - -**A:** There are multiple reasons why Model Optimizer does not accept a counts file, including: -the file is not available or does not exist. Refer to FAQ :ref:`#89 `. - -.. _question-93: - -Q93. What does the message "For legacy MXNet models Model Optimizer does not support conversion of old MXNet models (trained with 1.0.0 version of MXNet and lower) with custom layers." mean? -############################################################################################################################################################################################### - -**A:** This message means that if you have a model with custom layers and its JSON file has been generated with Apache MXNet version -lower than 1.0.0, Model Optimizer does not support such topologies. If you want to convert it, you have to rebuild -MXNet with unsupported layers or generate a new JSON file with Apache MXNet version 1.0.0 or higher. You also need to implement -OpenVINO extension to use custom layers. -For more information, refer to the :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` guide. - -.. _question-94: - -Q94. What does the message "Expected token ````, has ``...``" mean? -##################################################################################################################################################### - -**A:** This error messages mean that Model Optimizer does not support your Kaldi model, because the Net contains ``ParallelComponent`` that does not end with the ```` tag. -Make sure that you provide a path to a true Kaldi model and try again. - -.. _question-95: - -.. _question-96: - -.. _question-97: - -Q97. What does the message "Graph contains a cycle. Can not proceed .." mean? -##################################################################################################################################################### - -**A:** Model Optimizer supports only straightforward models without cycles. - -There are multiple ways to avoid cycles: - -For Tensorflow: - -* :doc:`Convert models, created with TensorFlow Object Detection API <[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection>` - -For all frameworks: - -1. :doc:`Replace cycle containing Sub-graph in Model Optimizer [Legacy Solution] <../legacy-model-optimizer-extensibility>` -2. See :doc:`OpenVINO Extensibility Mechanism <../../../openvino-extensibility>` - -or - -* Edit the model in its original framework to exclude cycle. - -.. _question-98: - -.. _question-99: - -.. _question-100: - -Q100. What does the message "Interp layer shape inference function may be wrong, please, try to update layer shape inference function in the file (extensions/ops/interp.op at the line ...)." mean? -#################################################################################################################################################################################################### - -**A:** There are many flavors of Caffe framework, and most layers in them are implemented identically. -However, there are exceptions. For example, the output value of layer Interp is calculated differently in Deeplab-Caffe and classic Caffe. Therefore, if your model contains layer Interp and the conversion of your model has failed, modify the ``interp_infer`` function in the ``extensions/ops/interp.op`` file according to the comments in the file. - -.. _question-101: - -Q101. What does the message "Mean/scale values should ..." mean? -##################################################################################################################################################### - -**A:** It means that your mean/scale values have a wrong format. Specify mean/scale values in the form of ``layer_name(val1,val2,val3)``. -You need to specify values for each input of the model. For more information, refer to the :doc:`Converting a Model to Intermediate Representation <[legacy]-setting-input-shapes>` guide. - -.. _question-102: - -Q102. What does the message "Operation _contrib_box_nms is not supported ..." mean? -##################################################################################################################################################### - -**A:** It means that you are trying to convert a topology contains the ``_contrib_box_nms`` operation which is not supported directly. However, the sub-graph of operations including ``_contrib_box_nms`` could be replaced with the DetectionOutput layer if your topology is one of the ``gluoncv`` topologies. Specify the ``--enable_ssd_gluoncv`` command-line parameter for Model Optimizer to enable this transformation. - -.. _question-103: - -Q103. What does the message "ModelOptimizer is not able to parse "\*.caffemodel" mean? -##################################################################################################################################################### - -**A:** If a ``*.caffemodel`` file exists and is correct, the error occurred possibly because of the use of Python protobuf implementation. In some cases, error messages may appear during model parsing, for example: "``utf-8`` codec can't decode byte 0xe0 in position 4: invalid continuation byte in field: mo_caffe.SpatialTransformerParameter.transform_type". You can either use a newer Python version (3.8 - 3.11) or build the ``cpp`` implementation of ``protobuf`` yourself for your version of Python. For the complete instructions about building ``protobuf`` from sources, see the appropriate section in the :doc:`Converting Models with Model Optimizer <../legacy-conversion-api>` guide. - -.. _question-104: - -.. _question-105: - -Q105. What does the message "The IR preparation was executed by the legacy MO path. ..." mean? -##################################################################################################################################################### - -**A:** For the models in ONNX format, there are two available paths of IR conversion. -The old one is handled by the old Python implementation, while the new one uses new C++ frontends. -Starting from the 2022.1 version, the default IR conversion path for ONNX models is processed using the new ONNX frontend. -Certain features, such as ``--extensions`` and ``--transformations_config``, are not yet fully supported on the new frontends. -The new frontends support only paths to shared libraries (.dll and .so) for ``--extensions``. They support JSON configurations with defined library fields for ``--transformations_config``. -Inputs freezing (enabled by ``--freeze_placeholder_with_value`` or ``--input`` arguments) is not supported by the new frontends. -The IR conversion falls back to the old path if a user does not select any expected path of conversion explicitly (with ``--use_new_frontend`` or ``--use_legacy_frontend`` MO arguments) and unsupported pre-defined scenario is detected on the new frontend path. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes.rst deleted file mode 100644 index 9e445742278568..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes.rst +++ /dev/null @@ -1,156 +0,0 @@ -[LEGACY] Setting Input Shapes -==================================== - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Setting Input Shapes <../../../../openvino-workflow/model-preparation/setting-input-shapes>` article. - -With model conversion API you can increase your model's efficiency by providing an additional shape definition, with these two parameters: `input_shape` and `static_shape`. - - -.. meta:: - :description: Learn how to increase the efficiency of a model with MO by providing an additional shape definition with the input_shape and static_shape parameters. - - -Specifying input_shape parameter -################################ - -``convert_model()`` supports conversion of models with dynamic input shapes that contain undefined dimensions. -However, if the shape of data is not going to change from one inference request to another, -it is recommended to set up static shapes (when all dimensions are fully defined) for the inputs. -Doing it at this stage, instead of during inference in runtime, can be beneficial in terms of performance and memory consumption. -To set up static shapes, model conversion API provides the ``input_shape`` parameter. -For more information on input shapes under runtime, refer to the :doc:`Changing input shapes <../../../../openvino-workflow/running-inference/changing-input-shape>` guide. -To learn more about dynamic shapes in runtime, refer to the :doc:`Dynamic Shapes <../../../../openvino-workflow/running-inference/dynamic-shapes>` guide. - -The OpenVINO Runtime API may present certain limitations in inferring models with undefined dimensions on some hardware. -In this case, the ``input_shape`` parameter and the :doc:`reshape method <../../../../openvino-workflow/running-inference/changing-input-shape>` can help to resolve undefined dimensions. - -For example, run model conversion for the TensorFlow MobileNet model with the single input -and specify the input shape of ``[2,300,300,3]``: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("MobileNet.pb", input_shape=[2,300,300,3]) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model MobileNet.pb --input_shape [2,300,300,3] - - -If a model has multiple inputs, ``input_shape`` must be used in conjunction with ``input`` parameter. -The ``input`` parameter contains a list of input names, for which shapes in the same order are defined via ``input_shape``. -For example, launch model conversion for the ONNX OCR model with a pair of inputs ``data`` and ``seq_len`` -and specify shapes ``[3,150,200,1]`` and ``[3]`` for them: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("ocr.onnx", input=["data","seq_len"], input_shape=[[3,150,200,1],[3]]) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model ocr.onnx --input data,seq_len --input_shape [3,150,200,1],[3] - - -Alternatively, specify input shapes, using the ``input`` parameter as follows: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("ocr.onnx", input=[("data",[3,150,200,1]),("seq_len",[3])]) - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model ocr.onnx --input data[3,150,200,1],seq_len[3] - - -The ``input_shape`` parameter allows overriding original input shapes to ones compatible with a given model. -Dynamic shapes, i.e. with dynamic dimensions, can be replaced in the original model with static shapes for the converted model, and vice versa. -The dynamic dimension can be marked in model conversion API parameter as ``-1`` or ``?``. -For example, launch model conversion for the ONNX OCR model and specify dynamic batch dimension for inputs: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - ov_model = convert_model("ocr.onnx", input=["data","seq_len"], input_shape=[[-1,150,200,1],[-1]] - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model ocr.onnx --input data,seq_len --input_shape [-1,150,200,1],[-1] - - -To optimize memory consumption for models with undefined dimensions in run-time, model conversion API provides the capability to define boundaries of dimensions. -The boundaries of undefined dimension can be specified with ellipsis. -For example, launch model conversion for the ONNX OCR model and specify a boundary for the batch dimension: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. code-block:: py - :force: - - from openvino.tools.mo import convert_model - from openvino.runtime import Dimension - ov_model = convert_model("ocr.onnx", input=["data","seq_len"], input_shape=[[Dimension(1,3),150,200,1],[Dimension(1,3)]] - - .. tab-item:: CLI - :sync: cli - - .. code-block:: sh - - mo --input_model ocr.onnx --input data,seq_len --input_shape [1..3,150,200,1],[1..3] - - -Practically, some models are not ready for input shapes change. -In this case, a new input shape cannot be set via model conversion API. -For more information about shape follow the :doc:`inference troubleshooting <[legacy]-troubleshooting-reshape-errors>` -and :ref:`ways to relax shape inference flow ` guides. - -Additional Resources -#################### - -* :doc:`Convert a Model <../legacy-conversion-api>` -* :doc:`Cutting Off Parts of a Model <[legacy]-cutting-parts-of-a-model>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats.rst deleted file mode 100644 index fb9f41c755d4fb..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats.rst +++ /dev/null @@ -1,598 +0,0 @@ -[LEGACY] Supported Model Formats -===================================== - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Supported Model Formats <../../../../openvino-workflow/model-preparation>` article. - -.. toctree:: - :maxdepth: 1 - :hidden: - - Converting a TensorFlow Model <[legacy]-supported-model-formats/[legacy]-convert-tensorflow> - Converting an ONNX Model <[legacy]-supported-model-formats/[legacy]-convert-onnx> - Converting a PyTorch Model <[legacy]-supported-model-formats/[legacy]-convert-pytorch> - Converting a TensorFlow Lite Model <[legacy]-supported-model-formats/[legacy]-convert-tensorflow-lite> - Converting a PaddlePaddle Model <[legacy]-supported-model-formats/[legacy]-convert-paddle> - Model Conversion Tutorials <[legacy]-supported-model-formats/[legacy]-conversion-tutorials> - -.. meta:: - :description: Learn about supported model formats and the methods used to convert, read, and compile them in OpenVINO™. - - -**OpenVINO IR (Intermediate Representation)** - the proprietary and default format of OpenVINO, benefiting from the full extent of its features. All other supported model formats, as listed below, are converted to :doc:`OpenVINO IR <../../../openvino-ir-format>` to enable inference. Consider storing your model in this format to minimize first-inference latency, perform model optimization, and, in some cases, save space on your drive. - -**PyTorch, TensorFlow, ONNX, and PaddlePaddle** - can be used with OpenVINO Runtime API directly, -which means you do not need to save them as OpenVINO IR before including them in your application. -OpenVINO can read, compile, and convert them automatically, as part of its pipeline. - -In the Python API, these options are provided as three separate methods: -``read_model()``, ``compile_model()``, and ``convert_model()``. -The ``convert_model()`` method enables you to perform additional adjustments -to the model, such as setting shapes, changing model input types or layouts, -cutting parts of the model, freezing inputs, etc. For a detailed description -of the conversion process, see the -:doc:`model conversion guide <../legacy-conversion-api>`. - -Here are code examples of how to use these methods with different model formats: - -.. tab-set:: - - .. tab-item:: PyTorch - :sync: torch - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - * The ``convert_model()`` method: - - This is the only method applicable to PyTorch models. - - .. dropdown:: List of supported formats: - - * **Python objects**: - - * ``torch.nn.Module`` - * ``torch.jit.ScriptModule`` - * ``torch.jit.ScriptFunction`` - - .. code-block:: py - :force: - - import openvino - import torchvision - from openvino.tools.mo import convert_model - core = openvino.Core() - - model = torchvision.models.resnet50(weights='DEFAULT') - ov_model = convert_model(model) - compiled_model = core.compile_model(ov_model, "AUTO") - - For more details on conversion, refer to the - :doc:`guide <[legacy]-supported-model-formats/[legacy]-convert-pytorch>` - and an example `tutorial `__ - on this topic. - - .. tab-item:: TensorFlow - :sync: tf - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - * The ``convert_model()`` method: - - When you use the ``convert_model()`` method, you have more control and you can specify additional adjustments for ``ov.Model``. The ``read_model()`` and ``compile_model()`` methods are easier to use, however, they do not have such capabilities. With ``ov.Model`` you can choose to optimize, compile and run inference on it or serialize it into a file for subsequent use. - - .. dropdown:: List of supported formats: - - * **Files**: - - * SavedModel - ```` or ``.pb`` - * Checkpoint - ``.pb`` or ``.pbtxt`` - * MetaGraph - ``.meta`` - - * **Python objects**: - - * ``tf.keras.Model`` - * ``tf.keras.layers.Layer`` - * ``tf.Module`` - * ``tf.compat.v1.Graph`` - * ``tf.compat.v1.GraphDef`` - * ``tf.function`` - * ``tf.compat.v1.session`` - * ``tf.train.checkpoint`` - - .. code-block:: py - :force: - - import openvino - from openvino.tools.mo import convert_model - - core = openvino.Core() - ov_model = convert_model("saved_model.pb") - compiled_model = core.compile_model(ov_model, "AUTO") - - For more details on conversion, refer to the - :doc:`guide <[legacy]-supported-model-formats/[legacy]-convert-tensorflow>` - and an example `tutorial `__ - on this topic. - - * The ``read_model()`` and ``compile_model()`` methods: - - .. dropdown:: List of supported formats: - - * **Files**: - - * SavedModel - ```` or ``.pb`` - * Checkpoint - ``.pb`` or ``.pbtxt`` - * MetaGraph - ``.meta`` - - .. code-block:: py - :force: - - ov_model = read_model("saved_model.pb") - compiled_model = core.compile_model(ov_model, "AUTO") - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: C++ - :sync: cpp - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * SavedModel - ```` or ``.pb`` - * Checkpoint - ``.pb`` or ``.pbtxt`` - * MetaGraph - ``.meta`` - - .. code-block:: cpp - - ov::CompiledModel compiled_model = core.compile_model("saved_model.pb", "AUTO"); - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: C - :sync: c - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * SavedModel - ```` or ``.pb`` - * Checkpoint - ``.pb`` or ``.pbtxt`` - * MetaGraph - ``.meta`` - - .. code-block:: c - - ov_compiled_model_t* compiled_model = NULL; - ov_core_compile_model_from_file(core, "saved_model.pb", "AUTO", 0, &compiled_model); - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: CLI - :sync: cli - - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. - - .. code-block:: sh - - mo --input_model .pb - - For details on the conversion, refer to the - :doc:`article <[legacy]-supported-model-formats/[legacy]-convert-tensorflow>`. - - .. tab-item:: TensorFlow Lite - :sync: tflite - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - * The ``convert_model()`` method: - - When you use the ``convert_model()`` method, you have more control and you can specify additional adjustments for ``ov.Model``. The ``read_model()`` and ``compile_model()`` methods are easier to use, however, they do not have such capabilities. With ``ov.Model`` you can choose to optimize, compile and run inference on it or serialize it into a file for subsequent use. - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.tflite`` - - .. code-block:: py - :force: - - import openvino - from openvino.tools.mo import convert_model - - core = openvino.Core() - ov_model = convert_model(".tflite") - compiled_model = core.compile_model(ov_model, "AUTO") - - For more details on conversion, refer to the - :doc:`guide <[legacy]-supported-model-formats/[legacy]-convert-tensorflow>` - and an example `tutorial `__ - on this topic. - - - * The ``read_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.tflite`` - - .. code-block:: py - :force: - - import openvino - - core = openvino.Core() - ov_model = core.read_model(".tflite") - compiled_model = core.compile_model(ov_model, "AUTO") - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.tflite`` - - .. code-block:: py - :force: - - import openvino - - core = openvino.Core() - compiled_model = core.compile_model(".tflite", "AUTO") - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - - .. tab-item:: C++ - :sync: cpp - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.tflite`` - - .. code-block:: cpp - - ov::CompiledModel compiled_model = core.compile_model(".tflite", "AUTO"); - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: C - :sync: c - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.tflite`` - - .. code-block:: c - - ov_compiled_model_t* compiled_model = NULL; - ov_core_compile_model_from_file(core, ".tflite", "AUTO", 0, &compiled_model); - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: CLI - :sync: cli - - * The ``convert_model()`` method: - - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.tflite`` - - .. code-block:: sh - - mo --input_model .tflite - - For details on the conversion, refer to the - :doc:`article <[legacy]-supported-model-formats/[legacy]-convert-tensorflow-lite>`. - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - * The ``convert_model()`` method: - - When you use the ``convert_model()`` method, you have more control and you can specify additional adjustments for ``ov.Model``. The ``read_model()`` and ``compile_model()`` methods are easier to use, however, they do not have such capabilities. With ``ov.Model`` you can choose to optimize, compile and run inference on it or serialize it into a file for subsequent use. - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.onnx`` - - .. code-block:: py - :force: - - import openvino - from openvino.tools.mo import convert_model - - core = openvino.Core() - ov_model = convert_model(".onnx") - compiled_model = core.compile_model(ov_model, "AUTO") - - For more details on conversion, refer to the - :doc:`guide <[legacy]-supported-model-formats/[legacy]-convert-onnx>` - and an example `tutorial `__ - on this topic. - - - * The ``read_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.onnx`` - - .. code-block:: py - :force: - - import openvino - core = openvino.Core() - - ov_model = core.read_model(".onnx") - compiled_model = core.compile_model(ov_model, "AUTO") - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.onnx`` - - .. code-block:: py - :force: - - import openvino - core = openvino.Core() - - compiled_model = core.compile_model(".onnx", "AUTO") - - For a guide on how to run inference, see how to :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - - .. tab-item:: C++ - :sync: cpp - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.onnx`` - - .. code-block:: cpp - - ov::CompiledModel compiled_model = core.compile_model(".onnx", "AUTO"); - - For a guide on how to run inference, see how to :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: C - :sync: c - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.onnx`` - - .. code-block:: c - - ov_compiled_model_t* compiled_model = NULL; - ov_core_compile_model_from_file(core, ".onnx", "AUTO", 0, &compiled_model); - - For details on the conversion, refer to the :doc:`article <[legacy]-supported-model-formats/[legacy]-convert-onnx>` - - .. tab-item:: CLI - :sync: cli - - * The ``convert_model()`` method: - - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.onnx`` - - .. code-block:: sh - - mo --input_model .onnx - - For details on the conversion, refer to the - :doc:`article <[legacy]-supported-model-formats/[legacy]-convert-onnx>` - - .. tab-item:: PaddlePaddle - :sync: pdpd - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - * The ``convert_model()`` method: - - When you use the ``convert_model()`` method, you have more control and you can specify additional adjustments for ``ov.Model``. The ``read_model()`` and ``compile_model()`` methods are easier to use, however, they do not have such capabilities. With ``ov.Model`` you can choose to optimize, compile and run inference on it or serialize it into a file for subsequent use. - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.pdmodel`` - - * **Python objects**: - - * ``paddle.hapi.model.Model`` - * ``paddle.fluid.dygraph.layers.Layer`` - * ``paddle.fluid.executor.Executor`` - - .. code-block:: py - :force: - - import openvino - from openvino.tools.mo import convert_model - - core = openvino.Core() - ov_model = convert_model(".pdmodel") - compiled_model = core.compile_model(ov_model, "AUTO") - - For more details on conversion, refer to the - :doc:`guide <[legacy]-supported-model-formats/[legacy]-convert-paddle>` - and an example `tutorial `__ - on this topic. - - * The ``read_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.pdmodel`` - - .. code-block:: py - :force: - - import openvino - core = openvino.Core() - - ov_model = read_model(".pdmodel") - compiled_model = core.compile_model(ov_model, "AUTO") - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.pdmodel`` - - .. code-block:: py - :force: - - import openvino - core = openvino.Core() - - compiled_model = core.compile_model(".pdmodel", "AUTO") - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: C++ - :sync: cpp - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.pdmodel`` - - .. code-block:: cpp - - ov::CompiledModel compiled_model = core.compile_model(".pdmodel", "AUTO"); - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: C - :sync: c - - * The ``compile_model()`` method: - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.pdmodel`` - - .. code-block:: c - - ov_compiled_model_t* compiled_model = NULL; - ov_core_compile_model_from_file(core, ".pdmodel", "AUTO", 0, &compiled_model); - - For a guide on how to run inference, see how to - :doc:`Integrate OpenVINO™ with Your Application <../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>`. - - .. tab-item:: CLI - :sync: cli - - * The ``convert_model()`` method: - - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. - - .. dropdown:: List of supported formats: - - * **Files**: - - * ``.pdmodel`` - - .. code-block:: sh - - mo --input_model .pdmodel - - For details on the conversion, refer to the - :doc:`article <[legacy]-supported-model-formats/[legacy]-convert-paddle>`. - - -As OpenVINO support for **MXNet, Caffe, and Kaldi formats** has been **discontinued**, converting these legacy formats -to OpenVINO IR or ONNX before running inference should be considered the default path for use with OpenVINO. - -.. note:: - - If you want to keep working with the legacy formats the old way, refer to a previous - `OpenVINO LTS version and its documentation `__ . - - OpenVINO versions of 2023 are mostly compatible with the old instructions, - through a deprecated MO tool, installed with the deprecated OpenVINO Developer Tools package. - - `OpenVINO 2023.0 `__ is the last - release officially supporting the MO conversion process for the legacy formats. - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials.rst deleted file mode 100644 index 5fbe486a20960a..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials.rst +++ /dev/null @@ -1,59 +0,0 @@ -[LEGACY] Model Conversion Tutorials -==================================================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - [legacy]-conversion-tutorials/convert-tensorflow-attention-ocr - [legacy]-conversion-tutorials/convert-tensorflow-bert - [legacy]-conversion-tutorials/convert-tensorflow-crnn - [legacy]-conversion-tutorials/convert-tensorflow-deep-speech - [legacy]-conversion-tutorials/convert-tensorflow-efficient-det - [legacy]-conversion-tutorials/convert-tensorflow-face-net - [legacy]-conversion-tutorials/convert-tensorflow-gnmt - [legacy]-conversion-tutorials/convert-tensorflow-language-1b - [legacy]-conversion-tutorials/convert-tensorflow-ncf - [legacy]-conversion-tutorials/convert-tensorflow-object-detection - [legacy]-conversion-tutorials/convert-tensorflow-retina-net - [legacy]-conversion-tutorials/convert-tensorflow-slim-library - [legacy]-conversion-tutorials/convert-tensorflow-wide-and-deep-family - [legacy]-conversion-tutorials/convert-tensorflow-xlnet - [legacy]-conversion-tutorials/convert-tensorflow-yolo - [legacy]-conversion-tutorials/convert-onnx-faster-r-cnn - [legacy]-conversion-tutorials/convert-onnx-gpt-2 - [legacy]-conversion-tutorials/convert-onnx-mask-r-cnn - [legacy]-conversion-tutorials/convert-pytorch-bert-ner - [legacy]-conversion-tutorials/convert-pytorch-cascade-rcnn-r-101 - [legacy]-conversion-tutorials/convert-pytorch-f3-net - [legacy]-conversion-tutorials/convert-pytorch-quartz-net - [legacy]-conversion-tutorials/convert-pytorch-rcan - [legacy]-conversion-tutorials/convert-pytorch-rnn-t - [legacy]-conversion-tutorials/convert-pytorch-yolact - - -.. meta:: - :description: Get to know conversion methods for specific TensorFlow, ONNX, and PyTorch models. - - -.. danger:: - - The code described in the tutorials has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../learn-openvino/interactive-tutorials-python>`. - -This section provides a set of tutorials that demonstrate conversion methods for specific -TensorFlow, ONNX, and PyTorch models. Note that these instructions do not cover all use -cases and may not reflect your particular needs. -Before studying the tutorials, try to convert the model out-of-the-box by specifying only the -``--input_model`` parameter in the command line. - -.. note:: - - Apache MXNet, Caffe, and Kaldi are no longer directly supported by OpenVINO. - -You will find a collection of :doc:`Python tutorials <../../../../../learn-openvino/interactive-tutorials-python>` written for running on Jupyter notebooks -that provide an introduction to the OpenVINO™ toolkit and explain how to use the Python API and tools for -optimized deep learning inference. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-faster-r-cnn.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-faster-r-cnn.rst deleted file mode 100644 index 7880b261c80b81..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-faster-r-cnn.rst +++ /dev/null @@ -1,41 +0,0 @@ -Converting an ONNX Faster R-CNN Model -===================================== - - -.. meta:: - :description: Learn how to convert a Faster R-CNN model - from ONNX to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -The instructions below are applicable **only** to the Faster R-CNN model converted to the ONNX file format from the `maskrcnn-benchmark model `__: - -1. Download the pretrained model file from `onnx/models `__ (commit-SHA: 8883e49e68de7b43e263d56b9ed156dfa1e03117). - -2. Generate the Intermediate Representation of the model, by changing your current working directory to the model conversion API installation directory, and running model conversion with the following parameters: - - .. code-block:: sh - - mo \ - --input_model FasterRCNN-10.onnx \ - --input_shape [1,3,800,800] \ - --input 0:2 \ - --mean_values [102.9801,115.9465,122.7717] \ - --transformations_config front/onnx/faster_rcnn.json - - - Be aware that the height and width specified with the ``input_shape`` command line parameter - could be different. For more information about supported input image dimensions and - required pre- and post-processing steps, refer to the - `Faster R-CNN article `__. - -3. Interpret the outputs of the generated IR: class indices, probabilities and box coordinates. Below are the outputs from the ``DetectionOutput`` layer: - - * class indices - * probabilities - * box coordinates - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-gpt-2.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-gpt-2.rst deleted file mode 100644 index 4c10c941c7fb47..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-gpt-2.rst +++ /dev/null @@ -1,34 +0,0 @@ -Converting an ONNX GPT-2 Model -============================== - - -.. meta:: - :description: Learn how to convert a pre-trained GPT-2 - model from ONNX to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -`Public pre-trained GPT-2 model `__ is a large -transformer-based language model with a simple objective: predict the next word, given all of the previous words within some text. - -Downloading the Pre-Trained Base GPT-2 Model -############################################ - -To download the model, go to `this model `__, and press **Download**. - -To download the model and sample test data, go to `this model `__, and press **Download**. - -Converting an ONNX GPT-2 Model to IR -#################################### - -Generate the Intermediate Representation of the model GPT-2 by running model conversion with the following parameters: - -.. code-block:: sh - - mo --input_model gpt2-10.onnx --input_shape [X,Y,Z] --output_dir - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-mask-r-cnn.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-mask-r-cnn.rst deleted file mode 100644 index 6158f5bdcb59ed..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-onnx-mask-r-cnn.rst +++ /dev/null @@ -1,41 +0,0 @@ -Converting an ONNX Mask R-CNN Model -=================================== - - -.. meta:: - :description: Learn how to convert a pre-trained Mask - R-CNN model from ONNX to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -The instructions below are applicable **only** to the Mask R-CNN model converted to the ONNX file format from the `maskrcnn-benchmark model `__. - -1. Download the pretrained model file from `onnx/models `__ (commit-SHA: 8883e49e68de7b43e263d56b9ed156dfa1e03117). - -2. Generate the Intermediate Representation of the model by changing your current working directory to the model conversion API installation directory and running model conversion with the following parameters: - - .. code-block:: sh - - mo \ - --input_model mask_rcnn_R_50_FPN_1x.onnx \ - --input "0:2" \ - --input_shape [1,3,800,800] \ - --mean_values [102.9801,115.9465,122.7717] \ - --transformations_config front/onnx/mask_rcnn.json - - - Be aware that the height and width specified with the ``input_shape`` command line parameter could be different. For more information about supported input image dimensions and required pre- and post-processing steps, refer to the `documentation `__. - -3. Interpret the outputs of the generated IR file: masks, class indices, probabilities and box coordinates: - - * masks - * class indices - * probabilities - * box coordinates - -The first one is a layer with the name ``6849/sink_port_0``, and rest are outputs from the ``DetectionOutput`` layer. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-bert-ner.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-bert-ner.rst deleted file mode 100644 index e89d21f28c66c4..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-bert-ner.rst +++ /dev/null @@ -1,76 +0,0 @@ -Converting a PyTorch BERT-NER Model -=================================== - - -.. meta:: - :description: Learn how to convert a BERT-NER model - from PyTorch to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -The goal of this article is to present a step-by-step guide on how to convert PyTorch BERT-NER model to OpenVINO IR. First, you need to download the model and convert it to ONNX. - - -Downloading and Converting the Model to ONNX -############################################ - -To download a pretrained model or train the model yourself, refer -to the `instructions `__ in the -BERT-NER model repository. The model with configuration files is stored in the ``out_base`` directory. - -To convert the model to ONNX format, create and run the following script in the root -directory of the model repository. If you download the pretrained model, you need -to download `bert.py `__ to run the script. -The instructions were tested with the commit-SHA: ``e5be564156f194f1becb0d82aeaf6e762d9eb9ed``. - -.. code-block:: py - :force: - - import torch - - from bert import Ner - - ner = Ner("out_base") - - input_ids, input_mask, segment_ids, valid_positions = ner.preprocess('Steve went to Paris') - input_ids = torch.tensor([input_ids], dtype=torch.long, device=ner.device) - input_mask = torch.tensor([input_mask], dtype=torch.long, device=ner.device) - segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=ner.device) - valid_ids = torch.tensor([valid_positions], dtype=torch.long, device=ner.device) - - ner_model, tknizr, model_config = ner.load_model("out_base") - - with torch.no_grad(): - logits = ner_model(input_ids, segment_ids, input_mask, valid_ids) - torch.onnx.export(ner_model, - (input_ids, segment_ids, input_mask, valid_ids), - "bert-ner.onnx", - input_names=['input_ids', 'segment_ids', 'input_mask', 'valid_ids'], - output_names=['output'], - dynamic_axes={ - "input_ids": {0: "batch_size"}, - "segment_ids": {0: "batch_size"}, - "input_mask": {0: "batch_size"}, - "valid_ids": {0: "batch_size"}, - "output": {0: "output"} - }, - opset_version=11, - ) - - -The script generates ONNX model file ``bert-ner.onnx``. - -Converting an ONNX BERT-NER model to IR -####################################### - -.. code-block:: sh - - mo --input_model bert-ner.onnx --input "input_mask[1,128],segment_ids[1,128],input_ids[1,128]" - - -where ``1`` is ``batch_size`` and ``128`` is ``sequence_length``. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-cascade-rcnn-r-101.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-cascade-rcnn-r-101.rst deleted file mode 100644 index a61ca5e79f1c30..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-cascade-rcnn-r-101.rst +++ /dev/null @@ -1,51 +0,0 @@ -Converting a PyTorch Cascade RCNN R-101 Model -============================================= - - -.. meta:: - :description: Learn how to convert a Cascade RCNN R-101 - model from PyTorch to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -The goal of this article is to present a step-by-step guide on how to convert a PyTorch Cascade RCNN R-101 model to OpenVINO IR. First, you need to download the model and convert it to ONNX. - -Downloading and Converting Model to ONNX -######################################## - -* Clone the `repository `__ : - - .. code-block:: sh - - git clone https://github.com/open-mmlab/mmdetection - cd mmdetection - - - .. note:: - - To set up an environment, refer to the `instructions `__. - -* Download the pre-trained `model `__. The model is also available `here `__. - -* To convert the model to ONNX format, use this `script `__. - - .. code-block:: sh - - python3 tools/deployment/pytorch2onnx.py configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth --output-file cascade_rcnn_r101_fpn_1x_coco.onnx - - -The script generates ONNX model file ``cascade_rcnn_r101_fpn_1x_coco.onnx`` in the directory ``tools/deployment/``. If required, specify the model name or output directory, using ``--output-file /.onnx``. - -Converting an ONNX Cascade RCNN R-101 Model to OpenVINO IR -########################################################## - -.. code-block:: sh - - mo --input_model cascade_rcnn_r101_fpn_1x_coco.onnx --mean_values [123.675,116.28,103.53] --scale_values [58.395,57.12,57.375] - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-f3-net.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-f3-net.rst deleted file mode 100644 index d1391cfb1519ba..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-f3-net.rst +++ /dev/null @@ -1,55 +0,0 @@ -Converting a PyTorch F3Net Model -================================ - - -.. meta:: - :description: Learn how to convert a F3Net model - from PyTorch to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -`F3Net `__ : Fusion, Feedback and Focus for Salient Object Detection - -Cloning the F3Net Repository -############################ - -To clone the repository, run the following command: - -.. code-block:: sh - - git clone http://github.com/weijun88/F3Net.git - - -Downloading and Converting the Model to ONNX -############################################ - -To download the pretrained model or train the model yourself, refer to the -`instructions `__ in the F3Net model repository. First, convert the model to ONNX format. Create and run the following Python script in the ``src`` directory of the model repository: - -.. code-block:: py - :force: - - import torch - from dataset import Config - from net import F3Net - - cfg = Config(mode='test', snapshot=) - net = F3Net(cfg) - image = torch.zeros([1, 3, 352, 352]) - torch.onnx.export(net, image, 'f3net.onnx', export_params=True, do_constant_folding=True, opset_version=11) - - -The script generates the ONNX model file ``f3net.onnx``. The model conversion was tested with the commit-SHA: ``eecace3adf1e8946b571a4f4397681252f9dc1b8``. - -Converting an ONNX F3Net Model to IR -#################################### - -.. code-block:: sh - - mo --input_model /f3net.onnx - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-quartz-net.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-quartz-net.rst deleted file mode 100644 index f1ee885dae0b26..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-quartz-net.rst +++ /dev/null @@ -1,61 +0,0 @@ -Converting a PyTorch QuartzNet Model -==================================== - - -.. meta:: - :description: Learn how to convert a QuartzNet model - from PyTorch to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -`NeMo project `__ provides the QuartzNet model. - -Downloading the Pre-trained QuartzNet Model -########################################### - -To download the pre-trained model, refer to the `NeMo Speech Models Catalog `__. -Here are the instructions on how to obtain QuartzNet in ONNX format. - -1. Install the NeMo toolkit, using the `instructions `__. - -2. Run the following code: - - .. code-block:: py - :force: - - import nemo - import nemo.collections.asr as nemo_asr - - quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name="QuartzNet15x5Base-En") - # Export QuartzNet model to ONNX format - quartznet.decoder.export('decoder_qn.onnx') - quartznet.encoder.export('encoder_qn.onnx') - quartznet.export('qn.onnx') - - - This code produces 3 ONNX model files: ``encoder_qn.onnx``, ``decoder_qn.onnx``, ``qn.onnx``. - They are ``decoder``, ``encoder``, and a combined ``decoder(encoder(x))`` models, respectively. - -Converting an ONNX QuartzNet model to IR -######################################## - -If using a combined model: - -.. code-block:: sh - - mo --input_model /qt.onnx --input_shape [B,64,X] - -If using separate models: - -.. code-block:: sh - - mo --input_model /encoder_qt.onnx --input_shape [B,64,X] - mo --input_model /decoder_qt.onnx --input_shape [B,1024,Y] - - -Where shape is determined by the audio file Mel-Spectrogram length: ``B`` - batch dimension, ``X`` - dimension based on the input length, ``Y`` - determined by encoder output, usually ``X / 2``. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rcan.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rcan.rst deleted file mode 100644 index 7e9fb7b5717cbd..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rcan.rst +++ /dev/null @@ -1,49 +0,0 @@ -Converting a PyTorch RCAN Model -=============================== - - -.. meta:: - :description: Learn how to convert a RCAN model - from PyTorch to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -`RCAN `__ : Image Super-Resolution Using Very Deep Residual Channel Attention Networks - -Downloading and Converting the Model to ONNX -############################################ - -To download the pre-trained model or train the model yourself, refer to the `instruction `__ in the RCAN model repository. First, convert the model to ONNX format. Create and run the script with the following content in the root -directory of the model repository: - -.. code-block:: py - :force: - - from argparse import Namespace - - import torch - - from RCAN_TestCode.code.model.rcan import RCAN - - config = Namespace(n_feats=64, n_resblocks=4, n_resgroups=2, reduction=16, scale=[2], data_train='DIV2K', res_scale=1, - n_colors=3, rgb_range=255) - net = RCAN(config) - net.eval() - dummy_input = torch.randn(1, 3, 360, 640) - torch.onnx.export(net, dummy_input, 'RCAN.onnx') - - -The script generates the ONNX model file ``RCAN.onnx``. More information about model parameters (``n_resblocks``, ``n_resgroups``, and others) and their different values can be found in the model repository. The model conversion was tested with the commit-SHA: ``3339ebc59519c3bb2b5719b87dd36515ec7f3ba7``. - -Converting an ONNX RCAN Model to IR -################################### - -.. code-block:: sh - - mo --input_model RCAN.onnx - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rnn-t.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rnn-t.rst deleted file mode 100644 index ad646568aed598..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-rnn-t.rst +++ /dev/null @@ -1,137 +0,0 @@ -Converting a PyTorch RNN-T Model -================================ - - -.. meta:: - :description: Learn how to convert a RNN-T model - from PyTorch to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This guide covers conversion of RNN-T model from `MLCommons `__ repository. Follow -the instructions below to export a PyTorch model into ONNX, before converting it to IR: - -**Step 1**. Clone RNN-T PyTorch implementation from MLCommons repository (revision r1.0). Make a shallow clone to pull -only RNN-T model without full repository. If you already have a full repository, skip this and go to **Step 2**: - -.. code-block:: sh - - git clone -b r1.0 -n https://github.com/mlcommons/inference rnnt_for_openvino --depth 1 - cd rnnt_for_openvino - git checkout HEAD speech_recognition/rnnt - - -**Step 2**. If you already have a full clone of MLCommons inference repository, create a folder for -pretrained PyTorch model, where conversion into IR will take place. You will also need to specify the path to -your full clone at **Step 5**. Skip this step if you have a shallow clone. - -.. code-block:: sh - - mkdir rnnt_for_openvino - cd rnnt_for_openvino - - -**Step 3**. Download pre-trained weights for PyTorch implementation from `here `__. -For UNIX-like systems, you can use ``wget``: - -.. code-block:: sh - - wget https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt - - -The link was taken from ``setup.sh`` in the ``speech_recoginitin/rnnt`` subfolder. You will get exactly the same weights as -if you were following the `guide `__. - -**Step 4**. Install required Python packages: - -.. code-block:: sh - - pip3 install torch toml - - -**Step 5**. Export RNN-T model into ONNX, using the script below. Copy the code below into a file named -``export_rnnt_to_onnx.py`` and run it in the current directory ``rnnt_for_openvino``: - -.. note:: - - If you already have a full clone of MLCommons inference repository, you need - to specify the ``mlcommons_inference_path`` variable. - -.. code-block:: py - :force: - - import toml - import torch - import sys - - - def load_and_migrate_checkpoint(ckpt_path): - checkpoint = torch.load(ckpt_path, map_location="cpu") - migrated_state_dict = {} - for key, value in checkpoint['state_dict'].items(): - key = key.replace("joint_net", "joint.net") - migrated_state_dict[key] = value - del migrated_state_dict["audio_preprocessor.featurizer.fb"] - del migrated_state_dict["audio_preprocessor.featurizer.window"] - return migrated_state_dict - - - mlcommons_inference_path = './' # specify relative path for MLCommons inferene - checkpoint_path = 'DistributedDataParallel_1576581068.9962234-epoch-100.pt' - config_toml = 'speech_recognition/rnnt/pytorch/configs/rnnt.toml' - config = toml.load(config_toml) - rnnt_vocab = config['labels']['labels'] - sys.path.insert(0, mlcommons_inference_path + 'speech_recognition/rnnt/pytorch') - - from model_separable_rnnt import RNNT - - model = RNNT(config['rnnt'], len(rnnt_vocab) + 1, feature_config=config['input_eval']) - model.load_state_dict(load_and_migrate_checkpoint(checkpoint_path)) - - seq_length, batch_size, feature_length = 157, 1, 240 - inp = torch.randn([seq_length, batch_size, feature_length]) - feature_length = torch.LongTensor([seq_length]) - x_padded, x_lens = model.encoder(inp, feature_length) - torch.onnx.export(model.encoder, (inp, feature_length), "rnnt_encoder.onnx", opset_version=12, - input_names=['input', 'feature_length'], output_names=['x_padded', 'x_lens'], - dynamic_axes={'input': {0: 'seq_len', 1: 'batch'}}) - - symbol = torch.LongTensor([[20]]) - hidden = torch.randn([2, batch_size, 320]), torch.randn([2, batch_size, 320]) - g, hidden = model.prediction.forward(symbol, hidden) - torch.onnx.export(model.prediction, (symbol, hidden), "rnnt_prediction.onnx", opset_version=12, - input_names=['symbol', 'hidden_in_1', 'hidden_in_2'], - output_names=['g', 'hidden_out_1', 'hidden_out_2'], - dynamic_axes={'symbol': {0: 'batch'}, 'hidden_in_1': {1: 'batch'}, 'hidden_in_2': {1: 'batch'}}) - - f = torch.randn([batch_size, 1, 1024]) - model.joint.forward(f, g) - torch.onnx.export(model.joint, (f, g), "rnnt_joint.onnx", opset_version=12, - input_names=['0', '1'], output_names=['result'], dynamic_axes={'0': {0: 'batch'}, '1': {0: 'batch'}}) - - -.. code-block:: sh - - python3 export_rnnt_to_onnx.py - - -After completing this step, the files ``rnnt_encoder.onnx``, ``rnnt_prediction.onnx``, and ``rnnt_joint.onnx`` will be saved in the current directory. - -**Step 6**. Run the conversion commands: - -.. code-block:: sh - - mo --input_model rnnt_encoder.onnx --input "input[157,1,240],feature_length->157" - mo --input_model rnnt_prediction.onnx --input "symbol[1,1],hidden_in_1[2,1,320],hidden_in_2[2,1,320]" - mo --input_model rnnt_joint.onnx --input "0[1,1,1024],1[1,1,320]" - - -.. note:: - - The hardcoded value for sequence length = 157 was taken from the MLCommons, but conversion to IR preserves network :doc:`reshapeability <../../../../../../openvino-workflow/running-inference/changing-input-shape>`. Therefore, input shapes can be changed manually to any value during either conversion or inference. - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-yolact.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-yolact.rst deleted file mode 100644 index 0eacbd6c5b0bf9..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-pytorch-yolact.rst +++ /dev/null @@ -1,222 +0,0 @@ -Converting a PyTorch YOLACT Model -================================= - - -.. meta:: - :description: Learn how to convert a YOLACT model - from PyTorch to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -You Only Look At CoefficienTs (YOLACT) is a simple, fully convolutional model for real-time instance segmentation. -The PyTorch implementation is publicly available in `this GitHub repository `__. -The YOLACT++ model is not supported, because it uses deformable convolutional layers that cannot be represented in ONNX format. - -.. _patch-file-yolact: - -Creating a Patch File -##################### - -Before converting the model, create a patch file for the repository. -The patch modifies the framework code by adding a special command-line argument to the framework options. The argument enables inference graph dumping: - -1. Go to a writable directory and create a ``YOLACT_onnx_export.patch`` file. -2. Copy the following diff code to the file: - - .. code-block:: console - - From 76deb67d4f09f29feda1a633358caa18335d9e9f Mon Sep 17 00:00:00 2001 - From: "OpenVINO" - Date: Fri, 12 Mar 2021 00:27:35 +0300 - Subject: [PATCH] Add export to ONNX - - --- - eval.py | 5 ++++- - utils/augmentations.py | 7 +++++-- - yolact.py | 29 +++++++++++++++++++---------- - 3 files changed, 28 insertions(+), 13 deletions(-) - - diff --git a/eval.py b/eval.py - index 547bc0a..bde0680 100644 - --- a/eval.py - +++ b/eval.py - @@ -593,9 +593,12 @@ def badhash(x): - return x - - def evalimage(net:Yolact, path:str, save_path:str=None): - - frame = torch.from_numpy(cv2.imread(path)).cuda().float() - + frame = torch.from_numpy(cv2.imread(path)).float() - + if torch.cuda.is_available(): - + frame = frame.cuda() - batch = FastBaseTransform()(frame.unsqueeze(0)) - preds = net(batch) - + torch.onnx.export(net, batch, "yolact.onnx", opset_version=11) - - img_numpy = prep_display(preds, frame, None, None, undo_transform=False) - - diff --git a/utils/augmentations.py b/utils/augmentations.py - index cc7a73a..2420603 100644 - --- a/utils/augmentations.py - +++ b/utils/augmentations.py - @@ -623,8 +623,11 @@ class FastBaseTransform(torch.nn.Module): - def __init__(self): - super().__init__() - - - self.mean = torch.Tensor(MEANS).float().cuda()[None, :, None, None] - - self.std = torch.Tensor( STD ).float().cuda()[None, :, None, None] - + self.mean = torch.Tensor(MEANS).float()[None, :, None, None] - + self.std = torch.Tensor( STD ).float()[None, :, None, None] - + if torch.cuda.is_available(): - + self.mean.cuda() - + self.std.cuda() - self.transform = cfg.backbone.transform - - def forward(self, img): - diff --git a/yolact.py b/yolact.py - index d83703b..f8c787c 100644 - --- a/yolact.py - +++ b/yolact.py - @@ -17,19 +17,22 @@ import torch.backends.cudnn as cudnn - from utils import timer - from utils.functions import MovingAverage, make_net - - -# This is required for Pytorch 1.0.1 on Windows to initialize Cuda on some driver versions. - -# See the bug report here: https://github.com/pytorch/pytorch/issues/17108 - -torch.cuda.current_device() - - - -# As of March 10, 2019, Pytorch DataParallel still doesn't support JIT Script Modules - -use_jit = torch.cuda.device_count() <= 1 - -if not use_jit: - - print('Multiple GPUs detected! Turning off JIT.') - +use_jit = False - - ScriptModuleWrapper = torch.jit.ScriptModule if use_jit else nn.Module - script_method_wrapper = torch.jit.script_method if use_jit else lambda fn, _rcn=None: fn - - - +def decode(loc, priors): - + variances = [0.1, 0.2] - + boxes = torch.cat((priors[:, :2] + loc[:, :, :2] * variances[0] * priors[:, 2:], priors[:, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2) - + - + boxes_result1 = boxes[:, :, :2] - boxes[:, :, 2:] / 2 - + boxes_result2 = boxes[:, :, 2:] + boxes_result1 - + boxes_result = torch.cat((boxes_result1, boxes_result2), 2) - + - + return boxes_result - + - - class Concat(nn.Module): - def __init__(self, nets, extra_params): - @@ -476,7 +479,10 @@ class Yolact(nn.Module): - - def load_weights(self, path): - """ Loads weights from a compressed save file. """ - - state_dict = torch.load(path) - + if torch.cuda.is_available(): - + state_dict = torch.load(path) - + else: - + state_dict = torch.load(path, map_location=torch.device('cpu')) - - # For backward compatibility, remove these (the new variable is called layers) - for key in list(state_dict.keys()): - @@ -673,8 +679,11 @@ class Yolact(nn.Module): - else: - pred_outs['conf'] = F.softmax(pred_outs['conf'], -1) - - - return self.detect(pred_outs, self) - + pred_outs['boxes'] = decode(pred_outs['loc'], pred_outs['priors']) # decode output boxes - - + pred_outs.pop('priors') # remove unused in postprocessing layers - + pred_outs.pop('loc') # remove unused in postprocessing layers - + return pred_outs - - - - -- - - -3. Save and close the file. - -Converting a YOLACT Model to the OpenVINO IR format -################################################### - -**Step 1**. Clone the GitHub repository and check out the commit: - -1. Clone the YOLACT repository: - - .. code-block:: sh - - git clone https://github.com/dbolya/yolact - - -2. Check out the necessary commit: - - .. code-block:: sh - - git checkout 57b8f2d95e62e2e649b382f516ab41f949b57239 - - -3. Set up the environment as described in ``README.md``. - -**Step 2**. Download a pre-trained model from the list attached in the ``Evaluation`` section of ``README.md`` document, for example ``yolact_base_54_800000.pth``. - -**Step 3**. Export the model to ONNX format. - -1. Apply the `YOLACT_onnx_export.patch` patch to the repository. Refer to the :ref:`Create a Patch File ` instructions if you do not have it: - - .. code-block:: sh - - git apply /path/to/patch/YOLACT_onnx_export.patch - - -2. Evaluate the YOLACT model to export it to ONNX format: - - .. code-block:: sh - - python3 eval.py \ - --trained_model=/path/to/yolact_base_54_800000.pth \ - --score_threshold=0.3 \ - --top_k=10 \ - --image=/path/to/image.jpg \ - --cuda=False - - -3. The script may fail, but you should get ``yolact.onnx`` file. - -**Step 4**. Convert the model to the IR: - -.. code-block:: sh - - mo --input_model /path/to/yolact.onnx - - -**Step 5**. Embed input preprocessing into the IR: - -To get performance gain by offloading to the OpenVINO application of mean/scale values and RGB->BGR conversion, use the following model conversion API parameters: - -* If the backbone of the model is Resnet50-FPN or Resnet101-FPN, use the following MO command line: - - .. code-block:: sh - - mo \ - --input_model /path/to/yolact.onnx \ - --reverse_input_channels \ - --mean_values "[123.68, 116.78, 103.94]" \ - --scale_values "[58.40, 57.12, 57.38]" - - -* If the backbone of the model is Darknet53-FPN, use the following MO command line: - - .. code-block:: sh - - mo \ - --input_model /path/to/yolact.onnx \ - --reverse_input_channels \ - --scale 255 - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-attention-ocr.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-attention-ocr.rst deleted file mode 100644 index dd419456ccbcd3..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-attention-ocr.rst +++ /dev/null @@ -1,60 +0,0 @@ -Converting a TensorFlow Attention OCR Model -=========================================== - - -.. meta:: - :description: Learn how to convert the Attention OCR - model from the TensorFlow Attention OCR repository to the - OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This tutorial explains how to convert the Attention OCR (AOCR) model from the `TensorFlow Attention OCR repository `__ to the Intermediate Representation (IR). - -Extracting a Model from ``aocr`` Library -######################################## - -To get an AOCR model, download ``aocr`` Python library: - -.. code-block:: sh - - pip install git+https://github.com/emedvedev/attention-ocr.git@master#egg=aocr - -This library contains a pretrained model and allows training and running AOCR, using the command line. After installation of `aocr`, extract the model: - -.. code-block:: sh - - aocr export --format=frozengraph model/path/ - -Once extracted, the model can be found in ``model/path/`` folder. - -Converting the TensorFlow AOCR Model to IR -########################################## - -The original AOCR model includes the preprocessing data, which contains: - -* Decoding input data to binary format where input data is an image represented as a string. -* Resizing binary image to working resolution. - -The resized image is sent to the convolution neural network (CNN). Because model conversion API does not support image decoding, the preprocessing part of the model should be cut off, using the ``input`` command-line parameter. - -.. code-block:: sh - - mo \ - --input_model=model/path/frozen_graph.pb \ - --input="map/TensorArrayStack/TensorArrayGatherV3:0[1,32,86,1]" \ - --output "transpose_1,transpose_2" \ - --output_dir path/to/ir/ - - -Where: - -* ``map/TensorArrayStack/TensorArrayGatherV3:0[1 32 86 1]`` - name of node producing tensor after preprocessing. -* ``transpose_1`` - name of the node producing tensor with predicted characters. -* ``transpose_2`` - name of the node producing tensor with predicted characters probabilities. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-bert.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-bert.rst deleted file mode 100644 index 197b6e13c4e27a..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-bert.rst +++ /dev/null @@ -1,170 +0,0 @@ -Converting a TensorFlow BERT Model -================================== - - -.. meta:: - :description: Learn how to convert a BERT model - from TensorFlow to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -Pretrained models for BERT (Bidirectional Encoder Representations from Transformers) are -`publicly available `__. - -.. _supported_models: - -Supported Models -################ - -The following models from the pretrained `BERT model list `__ are currently supported: - -* ``BERT-Base, Cased`` -* ``BERT-Base, Uncased`` -* ``BERT-Base, Multilingual Cased`` -* ``BERT-Base, Multilingual Uncased`` -* ``BERT-Base, Chinese`` -* ``BERT-Large, Cased`` -* ``BERT-Large, Uncased`` - -Downloading the Pretrained BERT Model -##################################### - -Download and unzip an archive with the `BERT-Base, Multilingual Uncased Model `__. - -After the archive is unzipped, the directory ``uncased_L-12_H-768_A-12`` is created and contains the following files: - -* ``bert_config.json`` -* ``bert_model.ckpt.data-00000-of-00001`` -* ``bert_model.ckpt.index`` -* ``bert_model.ckpt.meta`` -* ``vocab.txt`` - -Pretrained model meta-graph files are ``bert_model.ckpt.*``. - -Converting a TensorFlow BERT Model to IR -######################################### - -To generate the BERT Intermediate Representation (IR) of the model, run model conversion with the following parameters: - -.. code-block:: sh - - mo \ - --input_meta_graph uncased_L-12_H-768_A-12/bert_model.ckpt.meta \ - --output bert/pooler/dense/Tanh \ - --input Placeholder{i32},Placeholder_1{i32},Placeholder_2{i32} - - -Pretrained models are not suitable for batch reshaping out-of-the-box because of multiple hardcoded shapes in the model. - -Converting a Reshapable TensorFlow BERT Model to OpenVINO IR -============================================================= - -Follow these steps to make a pretrained TensorFlow BERT model reshapable over batch dimension: - -1. Download a pretrained BERT model you want to use from the `Supported Models list <#supported_models>`__. - -2. Clone google-research/bert git repository: - - .. code-block:: sh - - https://github.com/google-research/bert.git - -3. Go to the root directory of the cloned repository: - - .. code-block:: sh - - cd bert - -4. (Optional) Checkout to the commit that the conversion was tested on: - - .. code-block:: sh - - git checkout eedf5716c - -5. Download script to load GLUE data: - - * For UNIX-like systems, run the following command: - - .. code-block:: sh - - wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py - - * For Windows systems: - - Download the `Python script `__ to the current working directory. - -6. Download GLUE data by running: - - .. code-block:: sh - - python3 download_glue_data.py --tasks MRPC - -7. Open the file ``modeling.py`` in the text editor and delete lines 923-924. They should look like this: - - .. code-block:: py - :force: - - if not non_static_indexes: - return shape - -8. Open the file ``run_classifier.py`` and insert the following code after the line 645: - - .. code-block:: py - :force: - - import os, sys - import tensorflow as tf - from tensorflow.python.framework import graph_io - with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess: - (assignment_map, initialized_variable_names) = \ - modeling.get_assignment_map_from_checkpoint(tf.compat.v1.trainable_variables(), init_checkpoint) - tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map) - sess.run(tf.compat.v1.global_variables_initializer()) - frozen = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["bert/pooler/dense/Tanh"]) - graph_io.write_graph(frozen, './', 'inference_graph.pb', as_text=False) - print('BERT frozen model path {}'.format(os.path.join(os.path.dirname(__file__), 'inference_graph.pb'))) - sys.exit(0) - - Lines before the inserted code should look like this: - - .. code-block:: py - :force: - - (total_loss, per_example_loss, logits, probabilities) = create_model( - bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, - num_labels, use_one_hot_embeddings) - - -9. Set environment variables ``BERT_BASE_DIR``, ``BERT_REPO_DIR`` and run the script ``run_classifier.py`` to create ``inference_graph.pb`` file in the root of the cloned BERT repository. - - .. code-block:: sh - - export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 - export BERT_REPO_DIR=/current/working/directory - - python3 run_classifier.py \ - --task_name=MRPC \ - --do_eval=true \ - --data_dir=$BERT_REPO_DIR/glue_data/MRPC \ - --vocab_file=$BERT_BASE_DIR/vocab.txt \ - --bert_config_file=$BERT_BASE_DIR/bert_config.json \ - --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ - --output_dir=./ - - Run model conversion with the following command line parameters to generate reshape-able BERT Intermediate Representation (IR): - - .. code-block:: sh - - mo \ - --input_model inference_graph.pb \ - --input "IteratorGetNext:0{i32}[1,128],IteratorGetNext:1{i32}[1,128],IteratorGetNext:4{i32}[1,128]" - -For other applicable parameters, refer to the :doc:`Convert Model from TensorFlow <../[legacy]-convert-tensorflow>` guide. - -For more information about reshape abilities, refer to the :doc:`Using Shape Inference <../../../../../../openvino-workflow/running-inference/changing-input-shape>` guide. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-crnn.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-crnn.rst deleted file mode 100644 index a94d72b4508f3c..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-crnn.rst +++ /dev/null @@ -1,86 +0,0 @@ -Converting a TensorFlow CRNN Model -================================== - - -.. meta:: - :description: Learn how to convert a CRNN model - from TensorFlow to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This tutorial explains how to convert a CRNN model to OpenVINO™ Intermediate Representation (IR). - -There are several public versions of TensorFlow CRNN model implementation available on GitHub. This tutorial explains how to convert the model from -the `CRNN Tensorflow `__ repository to IR, and is validated with Python 3.7, TensorFlow 1.15.0, and protobuf 3.19.0. -If you have another implementation of CRNN model, it can be converted to OpenVINO IR in a similar way. You need to get inference graph and run model conversion of it. - -**To convert the model to IR:** - -**Step 1.** Clone this GitHub repository and check out the commit: - -1. Clone the repository: - - .. code-block:: sh - - git clone https://github.com/MaybeShewill-CV/CRNN_Tensorflow.git - -2. Go to the ``CRNN_Tensorflow`` directory of the cloned repository: - - .. code-block:: sh - - cd path/to/CRNN_Tensorflow - -3. Check out the necessary commit: - - .. code-block:: sh - - git checkout 64f1f1867bffaacfeacc7a80eebf5834a5726122 - - -**Step 2.** Train the model using the framework or the pretrained checkpoint provided in this repository. - - -**Step 3.** Create an inference graph: - -1. Add the ``CRNN_Tensorflow`` folder to ``PYTHONPATH``. - - * For Linux: - - .. code-block:: sh - - export PYTHONPATH="${PYTHONPATH}:/path/to/CRNN_Tensorflow/" - - - * For Windows, add ``/path/to/CRNN_Tensorflow/`` to the ``PYTHONPATH`` environment variable in settings. - -2. Edit the ``tools/demo_shadownet.py`` script. After ``saver.restore(sess=sess, save_path=weights_path)`` line, add the following code: - - .. code-block:: py - :force: - - from tensorflow.python.framework import graph_io - frozen = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['shadow/LSTMLayers/transpose_time_major']) - graph_io.write_graph(frozen, '.', 'frozen_graph.pb', as_text=False) - -3. Run the demo with the following command: - - .. code-block:: sh - - python tools/demo_shadownet.py --image_path data/test_images/test_01.jpg --weights_path model/shadownet/shadownet_2017-10-17-11-47-46.ckpt-199999 - - - If you want to use your checkpoint, replace the path in the ``--weights_path`` parameter with a path to your checkpoint. - -4. In the ``CRNN_Tensorflow`` directory, you will find the inference CRNN graph ``frozen_graph.pb``. You can use this graph with OpenVINO to convert the model to IR and then run inference. - -**Step 4.** Convert the model to IR: - -.. code-block:: sh - - mo --input_model path/to/your/CRNN_Tensorflow/frozen_graph.pb - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-deep-speech.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-deep-speech.rst deleted file mode 100644 index e572b26324faf3..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-deep-speech.rst +++ /dev/null @@ -1,108 +0,0 @@ -Converting a TensorFlow DeepSpeech Model -======================================== - - -.. meta:: - :description: Learn how to convert a DeepSpeech model - from TensorFlow to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -`DeepSpeech project `__ provides an engine to train speech-to-text models. - -Downloading the Pretrained DeepSpeech Model -########################################### - -Create a directory where model and metagraph with pretrained weights will be stored: - -.. code-block:: sh - - mkdir deepspeech - cd deepspeech - -`Pre-trained English speech-to-text model `__ is publicly available. -To download the model, follow the instruction below: - -* For UNIX-like systems, run the following command: - - .. code-block:: sh - - wget -O - https://github.com/mozilla/DeepSpeech/archive/v0.8.2.tar.gz | tar xvfz - - wget -O - https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-checkpoint.tar.gz | tar xvfz - - -* For Windows systems: - - 1. Download `the archive with the model `__. - 2. Download the `TensorFlow MetaGraph with pre-trained weights `__. - 3. Unpack it with a file archiver application. - -Freezing the Model into a "\*.pb File" -###################################### - -After unpacking the archives above, you have to freeze the model. This requires -TensorFlow version 1, which is not available under Python 3.8, so you need Python 3.7 or lower. -Before freezing, deploy a virtual environment and install the required packages: - -.. code-block:: sh - - virtualenv --python=python3.7 venv-deep-speech - source venv-deep-speech/bin/activate - cd DeepSpeech-0.8.2 - pip3 install -e . - -Freeze the model with the following command: - -.. code-block:: sh - - python3 DeepSpeech.py --checkpoint_dir ../deepspeech-0.8.2-checkpoint --export_dir ../ - -After that, you will get the pretrained frozen model file ``output_graph.pb`` in the directory ``deepspeech`` created at -the beginning. The model contains the preprocessing and main parts. The first preprocessing part performs conversion of input -spectrogram into a form useful for speech recognition (mel). This part of the model is not convertible into -the IR because it contains unsupported operations ``AudioSpectrogram`` and ``Mfcc``. - -The main and most computationally expensive part of the model converts the preprocessed audio into text. -There are two specificities with the supported part of the model. - -The first is that the model contains an input with sequence length. So the model can be converted with -a fixed input length shape, thus the model is not reshapable. -Refer to the :doc:`Using Shape Inference <../../../../../../openvino-workflow/running-inference/changing-input-shape>` guide. - -The second is that the frozen model still has two variables: ``previous_state_c`` and ``previous_state_h``, figure -with the frozen \*.pb model is below. It means that the model keeps training these variables at each inference. - -.. image:: ../../../../../../assets/images/DeepSpeech-0.8.2.png - -At the first inference, the variables are initialized with zero tensors. After execution, the results of the ``BlockLSTM`` -are assigned to cell state and hidden state, which are these two variables. - -Converting the Main Part of DeepSpeech Model into OpenVINO IR -############################################################# - -Model conversion API assumes that the output model is for inference only. That is why you should cut ``previous_state_c`` and ``previous_state_h`` variables off and resolve keeping cell and hidden states on the application level. - -There are certain limitations for the model conversion: - -* Time length (``time_len``) and sequence length (``seq_len``) are equal. -* Original model cannot be reshaped, so you should keep original shapes. - -To generate the IR, run model conversion with the following parameters: - -.. code-block:: sh - - mo \ - --input_model output_graph.pb \ - --input "input_lengths->[16],input_node[1,16,19,26],previous_state_h[1,2048],previous_state_c[1,2048]" \ - --output "cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1,cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd,logits" - - -Where: - -* ``input_lengths->[16]`` Replaces the input node with name "input_lengths" with a constant tensor of shape [1] with a single integer value of 16. This means that the model now can consume input sequences of length 16 only. -* ``input_node[1 16 19 26],previous_state_h[1 2048],previous_state_c[1 2048]`` replaces the variables with a placeholder. -* ``output ".../GatherNd_1,.../GatherNd,logits"`` output node names. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-efficient-det.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-efficient-det.rst deleted file mode 100644 index c894765a5dc604..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-efficient-det.rst +++ /dev/null @@ -1,90 +0,0 @@ -Converting TensorFlow EfficientDet Models -========================================= - - -.. meta:: - :description: Learn how to convert an EfficientDet model - from TensorFlow to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This tutorial explains how to convert EfficientDet public object detection models to the Intermediate Representation (IR). - -.. _efficientdet-to-ir: - -Converting EfficientDet Model to the IR -####################################### - -There are several public versions of EfficientDet model implementation available on GitHub. This tutorial explains how to -convert models from the `repository `__ (commit 96e1fee) to the OpenVINO format. - -Download and extract the model checkpoint `efficientdet-d4.tar.gz `__ -referenced in the **"Pretrained EfficientDet Checkpoints"** section of the model repository: - -.. code-block:: sh - - wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco2/efficientdet-d4.tar.gz - tar zxvf efficientdet-d4.tar.gz - -Converting an EfficientDet TensorFlow Model to the IR -+++++++++++++++++++++++++++++++++++++++++++++++++++++ - -To generate the IR of the EfficientDet TensorFlow model, run: - -.. code-block:: sh - - mo \ - --input_meta_graph efficientdet-d4/model.meta \ - --input_shape [1,$IMAGE_SIZE,$IMAGE_SIZE,3] \ - --reverse_input_channels - - -Where ``$IMAGE_SIZE`` is the size that the input image of the original TensorFlow model will be resized to. Different -EfficientDet models were trained with different input image sizes. To determine the right one, refer to the ``efficientdet_model_param_dict`` -dictionary in the `hparams_config.py `__ file. -The attribute ``image_size`` specifies the shape to be defined for the model conversion. - -.. note:: - - The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the ``RGB<->BGR`` conversion specifying the command-line parameter: ``--reverse_input_channels``. Otherwise, inference results may be incorrect. For more information about the parameter, refer to the **When to Reverse Input Channels** section of the :doc:`Converting a Model to Intermediate Representation (IR) <../../[legacy]-setting-input-shapes>` guide. - -OpenVINO toolkit provides samples that can be used to infer EfficientDet model. -For more information, refer to the `Open Model Zoo Demos `__. - -.. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format have are now - published on `Hugging Face `__. - - -Interpreting Results of the TensorFlow Model and the IR -####################################################### - -The TensorFlow model produces as output a list of 7-element tuples: ``[image_id, y_min, x_min, y_max, x_max, confidence, class_id]``, where: - -* ``image_id`` -- image batch index. -* ``y_min`` -- absolute ``y`` coordinate of the lower left corner of the detected object. -* ``x_min`` -- absolute ``x`` coordinate of the lower left corner of the detected object. -* ``y_max`` -- absolute ``y`` coordinate of the upper right corner of the detected object. -* ``x_max`` -- absolute ``x`` coordinate of the upper right corner of the detected object. -* ``confidence`` -- the confidence of the detected object. -* ``class_id`` -- the id of the detected object class counted from 1. - -The output of the IR is a list of 7-element tuples: ``[image_id, class_id, confidence, x_min, y_min, x_max, y_max]``, where: - -* ``image_id`` -- image batch index. -* ``class_id`` -- the id of the detected object class counted from 0. -* ``confidence`` -- the confidence of the detected object. -* ``x_min`` -- normalized ``x`` coordinate of the lower left corner of the detected object. -* ``y_min`` -- normalized ``y`` coordinate of the lower left corner of the detected object. -* ``x_max`` -- normalized ``x`` coordinate of the upper right corner of the detected object. -* ``y_max`` -- normalized ``y`` coordinate of the upper right corner of the detected object. - -The first element with ``image_id = -1`` means end of data. - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-face-net.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-face-net.rst deleted file mode 100644 index a528718349f717..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-face-net.rst +++ /dev/null @@ -1,42 +0,0 @@ -Converting TensorFlow FaceNet Models -==================================== - - -.. meta:: - :description: Learn how to convert a FaceNet model - from TensorFlow to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Supported Model Formats <../../../../../../openvino-workflow/model-preparation>` article. - -`Public pre-trained FaceNet models `__ contain both training -and inference part of graph. Switch between this two states is manageable with placeholder value. -Intermediate Representation (IR) models are intended for inference, which means that train part is redundant. - -There are two inputs in this network: boolean ``phase_train`` which manages state of the graph (train/infer) and -``batch_size`` which is a part of batch joining pattern. - -.. image:: ../../../../../../assets/images/FaceNet.svg - -Converting a TensorFlow FaceNet Model to the IR -############################################### - -To generate a FaceNet OpenVINO model, feed a TensorFlow FaceNet model to model conversion API with the following parameters: - -.. code-block:: sh - - mo - --input_model path_to_model/model_name.pb \ - --freeze_placeholder_with_value "phase_train->False" - - -The batch joining pattern transforms to a placeholder with the model default shape if ``--input_shape`` or ``--batch``/``-b`` are not provided. Otherwise, the placeholder shape has custom parameters. - -* ``freeze_placeholder_with_value "phase_train->False"`` to switch graph to inference mode -* ``batch`*/*`-b`` is applicable to override original network batch -* ``input_shape`` is applicable with or without ``input`` -* other options are applicable - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-gnmt.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-gnmt.rst deleted file mode 100644 index b8d2c592ed931d..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-gnmt.rst +++ /dev/null @@ -1,315 +0,0 @@ -Converting a TensorFlow GNMT Model -================================== - - -.. meta:: - :description: Learn how to convert a GNMT model - from TensorFlow to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This tutorial explains how to convert Google Neural Machine Translation (GNMT) model to the Intermediate Representation (IR). - -There are several public versions of TensorFlow GNMT model implementation available on GitHub. This tutorial explains how to convert the GNMT model from the `TensorFlow Neural Machine Translation (NMT) repository `__ to the IR. - -Creating a Patch File -##################### - -Before converting the model, you need to create a patch file for the repository. The patch modifies the framework code by adding a special command-line argument to the framework options that enables inference graph dumping: - -1. Go to a writable directory and create a ``GNMT_inference.patch`` file. -2. Copy the following diff code to the file: - - .. code-block:: py - - diff --git a/nmt/inference.py b/nmt/inference.py - index 2cbef07..e185490 100644 - --- a/nmt/inference.py - +++ b/nmt/inference.py - @@ -17,9 +17,11 @@ - from __future__ import print_function - - import codecs - +import os - import time - - import tensorflow as tf - +from tensorflow.python.framework import graph_io - - from . import attention_model - from . import gnmt_model - @@ -105,6 +107,29 @@ def start_sess_and_load_model(infer_model, ckpt_path): - return sess, loaded_infer_model - - - +def inference_dump_graph(ckpt_path, path_to_dump, hparams, scope=None): - + model_creator = get_model_creator(hparams) - + infer_model = model_helper.create_infer_model(model_creator, hparams, scope) - + sess = tf.Session( - + graph=infer_model.graph, config=utils.get_config_proto()) - + with infer_model.graph.as_default(): - + loaded_infer_model = model_helper.load_model( - + infer_model.model, ckpt_path, sess, "infer") - + utils.print_out("Dumping inference graph to {}".format(path_to_dump)) - + loaded_infer_model.saver.save( - + sess, - + os.path.join(path_to_dump + 'inference_GNMT_graph') - + ) - + utils.print_out("Dumping done!") - + - + output_node_name = 'index_to_string_Lookup' - + utils.print_out("Freezing GNMT graph with output node {}...".format(output_node_name)) - + frozen = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, - + [output_node_name]) - + graph_io.write_graph(frozen, '.', os.path.join(path_to_dump, 'frozen_GNMT_inference_graph.pb'), as_text=False) - + utils.print_out("Freezing done. Freezed model frozen_GNMT_inference_graph.pb saved to {}".format(path_to_dump)) - + - + - def inference(ckpt_path, - inference_input_file, - inference_output_file, - diff --git a/nmt/nmt.py b/nmt/nmt.py - index f5823d8..a733748 100644 - --- a/nmt/nmt.py - +++ b/nmt/nmt.py - @@ -310,6 +310,13 @@ def add_arguments(parser): - parser.add_argument("--num_intra_threads", type=int, default=0, - help="number of intra_op_parallelism_threads") - - + # Special argument for inference model dumping without inference - + parser.add_argument("--dump_inference_model", type="bool", nargs="?", - + const=True, default=False, - + help="Argument for dump inference graph for specified trained ckpt") - + - + parser.add_argument("--path_to_dump", type=str, default="", - + help="Path to dump inference graph.") - - def create_hparams(flags): - """Create training hparams.""" - @@ -396,6 +403,9 @@ def create_hparams(flags): - language_model=flags.language_model, - num_intra_threads=flags.num_intra_threads, - num_inter_threads=flags.num_inter_threads, - + - + dump_inference_model=flags.dump_inference_model, - + path_to_dump=flags.path_to_dump, - ) - - - @@ -613,7 +623,7 @@ def create_or_load_hparams( - return hparams - - - -def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""): - +def run_main(flags, default_hparams, train_fn, inference_fn, inference_dump, target_session=""): - """Run main.""" - # Job - jobid = flags.jobid - @@ -653,8 +663,26 @@ def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""): - out_dir, default_hparams, flags.hparams_path, - save_hparams=(jobid == 0)) - - - ## Train / Decode - - if flags.inference_input_file: - + # Dumping inference model - + if flags.dump_inference_model: - + # Inference indices - + hparams.inference_indices = None - + if flags.inference_list: - + (hparams.inference_indices) = ( - + [int(token) for token in flags.inference_list.split(",")]) - + - + # Ckpt - + ckpt = flags.ckpt - + if not ckpt: - + ckpt = tf.train.latest_checkpoint(out_dir) - + - + # Path to dump graph - + assert flags.path_to_dump != "", "Please, specify path_to_dump model." - + path_to_dump = flags.path_to_dump - + if not tf.gfile.Exists(path_to_dump): tf.gfile.MakeDirs(path_to_dump) - + - + inference_dump(ckpt, path_to_dump, hparams) - + elif flags.inference_input_file: - # Inference output directory - trans_file = flags.inference_output_file - assert trans_file - @@ -693,7 +721,8 @@ def main(unused_argv): - default_hparams = create_hparams(FLAGS) - train_fn = train.train - inference_fn = inference.inference - - run_main(FLAGS, default_hparams, train_fn, inference_fn) - + inference_dump = inference.inference_dump_graph - + run_main(FLAGS, default_hparams, train_fn, inference_fn, inference_dump) - - - if __name__ == "__main__": - - -3. Save and close the file. - -Converting a GNMT Model to the IR -################################# - -.. note:: Use TensorFlow version 1.13 or lower. - -**Step 1**. Clone the GitHub repository and check out the commit: - -1. Clone the NMT repository: - - .. code-block:: sh - - git clone https://github.com/tensorflow/nmt.git - -2. Check out the necessary commit: - - .. code-block:: sh - - git checkout b278487980832417ad8ac701c672b5c3dc7fa553 - - -**Step 2**. Get a trained model. You have two options: - -* Train the model with the GNMT ``wmt16_gnmt_4_layer.json`` or ``wmt16_gnmt_8_layer.json`` configuration file using the NMT framework. -* *Do not use the pre-trained checkpoints provided in the NMT repository, as they are outdated and can be incompatible with the current repository version.* - -This tutorial assumes the use of the trained GNMT model from ``wmt16_gnmt_4_layer.json`` config, German to English translation. - -**Step 3**. Create an inference graph: - -The OpenVINO assumes that a model is used for inference only. Hence, before converting the model into the IR, you need to transform the training graph into the inference graph. -For the GNMT model, the training graph and the inference graph have different decoders: the training graph uses a greedy search decoding algorithm, while the inference graph uses a beam search decoding algorithm. - -1. Apply the ``GNMT_inference.patch`` patch to the repository. `Create a Patch File <#Creating-a-Patch-File>`__ instructions if you do not have it: - - .. code-block:: sh - - git apply /path/to/patch/GNMT_inference.patch - - -2. Run the NMT framework to dump the inference model: - - .. code-block:: sh - - python -m nmt.nmt - --src=de - --tgt=en - --ckpt=/path/to/ckpt/translate.ckpt - --hparams_path=/path/to/repository/nmt/nmt/standard_hparams/wmt16_gnmt_4_layer.json - --vocab_prefix=/path/to/vocab/vocab.bpe.32000 - --out_dir="" - --dump_inference_model - --infer_mode beam_search - --path_to_dump /path/to/dump/model/ - - -If you use different checkpoints, use the corresponding values for the ``src``, ``tgt``, ``ckpt``, ``hparams_path``, and ``vocab_prefix`` parameters. -Inference checkpoint ``inference_GNMT_graph`` and frozen inference graph ``frozen_GNMT_inference_graph.pb`` will appear in the ``/path/to/dump/model/`` folder. - -To generate ``vocab.bpe.32000``, execute the ``nmt/scripts/wmt16_en_de.sh`` script. If you face an issue of a size mismatch between the checkpoint graph's embedding layer and vocabulary (both src and target), make sure you add the following code to the ``nmt.py`` file to the ``extend_hparams`` function after the line 508 (after initialization of the ``src_vocab_size`` and ``tgt_vocab_size`` variables): - -.. code-block:: py - :force: - - src_vocab_size -= 1 - tgt_vocab_size -= 1 - - -**Step 4**. Convert the model to the IR: - -.. code-block:: sh - - mo - --input_model /path/to/dump/model/frozen_GNMT_inference_graph.pb - --input "IteratorGetNext:1{i32}[1],IteratorGetNext:0{i32}[1,50],dynamic_seq2seq/hash_table_Lookup_1:0[1]->[2],dynamic_seq2seq/hash_table_Lookup:0[1]->[1]" - --output dynamic_seq2seq/decoder/decoder/GatherTree - --output_dir /path/to/output/IR/ - - -Input and output cutting with the ``--input`` and ``--output`` options is required since OpenVINO™ does not support ``IteratorGetNext`` and ``LookupTableFindV2`` operations. - -Input cutting: - -* ``IteratorGetNext`` operation iterates over a dataset. It is cut by output ports: port 0 contains data tensor with shape ``[batch_size, max_sequence_length]``, port 1 contains ``sequence_length`` for every batch with shape ``[batch_size]``. - -* ``LookupTableFindV2`` operations (``dynamic_seq2seq/hash_table_Lookup_1`` and ``dynamic_seq2seq/hash_table_Lookup`` nodes in the graph) are cut with constant values). - -Output cutting: - -* ``LookupTableFindV2`` operation is cut from the output and the ``dynamic_seq2seq/decoder/decoder/GatherTree`` node is treated as a new exit point. - -For more information about model cutting, refer to the :doc:`Cutting Off Parts of a Model <../../[legacy]-cutting-parts-of-a-model>` guide. - -Using a GNMT Model -################## - -.. note:: - - This step assumes you have converted a model to the Intermediate Representation. - -Inputs of the model: - -* ``IteratorGetNext/placeholder_out_port_0`` input with shape ``[batch_size, max_sequence_length]`` contains ``batch_size`` decoded input sentences. Every sentence is decoded the same way as indices of sentence elements in vocabulary and padded with index of ``eos`` (end of sentence symbol). If the length of the sentence is less than ``max_sequence_length``, remaining elements are filled with index of ``eos`` token. - -* ``IteratorGetNext/placeholder_out_port_1`` input with shape ``[batch_size]`` contains sequence lengths for every sentence from the first input. For example, if ``max_sequence_length = 50``, ``batch_size = 1`` and the sentence has only 30 elements, then the input tensor for ``IteratorGetNext/placeholder_out_port_1`` should be ``[30]``. - - -Outputs of the model: - -* ``dynamic_seq2seq/decoder/decoder/GatherTree`` tensor with shape ``[max_sequence_length * 2, batch, beam_size]``, - that contains ``beam_size`` best translations for every sentence from input (also decoded as indices of words in - vocabulary). - -.. note:: - The shape of this tensor in TensorFlow can be different: instead of ``max_sequence_length * 2``, it can be any value less than that, because OpenVINO does not support dynamic shapes of outputs, while TensorFlow can stop decoding iterations when ``eos`` symbol is generated. - -Running GNMT IR ---------------- - -1. With benchmark app: - - .. code-block:: sh - - benchmark_app -m -d CPU - - -2. With OpenVINO Runtime Python API: - - .. note:: - - Before running the example, insert a path to your GNMT ``.xml`` and ``.bin`` files into ``MODEL_PATH`` and ``WEIGHTS_PATH``, and fill ``input_data_tensor`` and ``seq_lengths`` tensors according to your input data. - - .. code-block:: py - :force: - - from openvino.inference_engine import IENetwork, IECore - - MODEL_PATH = '/path/to/IR/frozen_GNMT_inference_graph.xml' - WEIGHTS_PATH = '/path/to/IR/frozen_GNMT_inference_graph.bin' - - # Creating network - net = IENetwork( - model=MODEL_PATH, - weights=WEIGHTS_PATH) - - # Creating input data - input_data = {'IteratorGetNext/placeholder_out_port_0': input_data_tensor, - 'IteratorGetNext/placeholder_out_port_1': seq_lengths} - - # Creating plugin and loading extensions - ie = IECore() - ie.add_extension(extension_path="libcpu_extension.so", device_name="CPU") - - # Loading network - exec_net = ie.load_network(network=net, device_name="CPU") - - # Run inference - result_ie = exec_net.infer(input_data) - - -For more information about Python API, refer to the :doc:`OpenVINO Runtime Python API <../../../../../../api/ie_python_api/api>` guide. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-language-1b.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-language-1b.rst deleted file mode 100644 index 1b51809f9d1b6b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-language-1b.rst +++ /dev/null @@ -1,131 +0,0 @@ -Converting a TensorFlow Language Model on One Billion Word Benchmark -==================================================================== - - -.. meta:: - :description: Learn how to convert a TensorFlow Language - Model on One Billion Word Benchmark to the OpenVINO Intermediate - Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -Downloading a Pre-trained Language Model on One Billion Word Benchmark -###################################################################### - -TensorFlow provides a pretrained `Language Model on One Billion Word Benchmark `__. - -To download the model for IR conversion, follow the instructions: - -1. Create new directory to store the model: - - .. code-block:: sh - - mkdir lm_1b - -2. Go to the ``lm_1b`` directory: - - .. code-block:: sh - - cd lm_1b - -3. Download the model GraphDef file: - - .. code-block:: sh - - wget http://download.tensorflow.org/models/LM_LSTM_CNN/graph-2016-09-10.pbtxt - -4. Create new directory to store 12 checkpoint shared files: - - .. code-block:: sh - - mkdir ckpt - -5. Go to the ``ckpt`` directory: - - .. code-block:: sh - - cd ckpt - -6. Download 12 checkpoint shared files: - - .. code-block:: sh - - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-base - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-char-embedding - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-lstm - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax0 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax1 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax2 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax3 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax4 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax5 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax6 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax7 - wget http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax8 - - -Once you have downloaded the pretrained model files, you will have the ``lm_1b`` directory with the following hierarchy: - -.. code-block:: sh - - lm_1b/ - graph-2016-09-10.pbtxt - ckpt/ - ckpt-base - ckpt-char-embedding - ckpt-lstm - ckpt-softmax0 - ckpt-softmax1 - ckpt-softmax2 - ckpt-softmax3 - ckpt-softmax4 - ckpt-softmax5 - ckpt-softmax6 - ckpt-softmax7 - ckpt-softmax8 - - - -.. image:: ../../../../../../assets/images/lm_1b.svg - -The frozen model still has two variables: ``Variable`` and ``Variable_1``. -It means that the model keeps training those variables at each inference. - -At the first inference of this graph, the variables are initialized by initial values. -After executing the ``lstm`` nodes, results of execution are assigned to these two variables. - -With each inference of the ``lm_1b`` graph, ``lstm`` initial states data is taken from previous inference -from variables, and states of current inference of ``lstm`` is reassigned to the same variables. - -It helps the model to remember the context of the words that it takes as input. - -Converting a TensorFlow Language Model on One Billion Word Benchmark to IR -########################################################################## - -Model Optimizer assumes that output model is for inference only. -Therefore, you should cut those variables off and resolve keeping cell and hidden states on application level. - -There is a certain limitation for the model conversion: the original model cannot be reshaped, so you should keep original shapes. - -To generate the ``lm_1b`` Intermediate Representation (IR), provide TensorFlow ``lm_1b`` model to the -Model Optimizer with parameters: - -.. code-block:: sh - - mo - --input_model lm_1b/graph-2016-09-10.pbtxt \ - --input_checkpoint lm_1b/ckpt \ - --input_model_is_text \ - --input_shape [50],[50],[1,9216],[1,9216] \ - --output softmax_out,lstm/lstm_0/concat_2,lstm/lstm_1/concat_2 \ - --input char_embedding/EmbeddingLookupUnique/Unique:0,char_embedding/EmbeddingLookupUnique/Unique:1,Variable/read,Variable_1/read - -Where: - -* ``--input char_embedding/EmbeddingLookupUnique/Unique:0,char_embedding/EmbeddingLookupUnique/Unique:1,Variable/read,Variable_1/read`` and ``--input_shape [50],[50],[1,9216],[1,9216]`` replace the variables with a placeholder. -* ``--output softmax_out,lstm/lstm_0/concat_2,lstm/lstm_1/concat_2`` specifies output node name and names of LSTM cell states. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-ncf.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-ncf.rst deleted file mode 100644 index a8592e75d65b31..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-ncf.rst +++ /dev/null @@ -1,68 +0,0 @@ -Converting a TensorFlow Neural Collaborative Filtering Model -============================================================ - - -.. meta:: - :description: Learn how to convert a Neural Collaborative - Filtering Model from TensorFlow to the OpenVINO Intermediate - Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This tutorial explains how to convert Neural Collaborative Filtering (NCF) model to the OpenVINO Intermediate Representation. - -`Public TensorFlow NCF model `__ does not contain pre-trained weights. To convert this model to the IR: - -1. Use `the instructions `__ from this repository to train the model. - -2. Freeze the inference graph you get in the previous step in ``model_dir``, following the instructions from the **Freezing Custom Models in Python** section of the :doc:`Converting a TensorFlow Model <../[legacy]-convert-tensorflow>` guide. - - Run the following commands: - - .. code-block:: py - :force: - - import tensorflow as tf - from tensorflow.python.framework import graph_io - - sess = tf.compat.v1.Session() - saver = tf.compat.v1.train.import_meta_graph("/path/to/model/model.meta") - saver.restore(sess, tf.train.latest_checkpoint('/path/to/model/')) - - frozen = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, \ - ["rating/BiasAdd"]) - graph_io.write_graph(frozen, './', 'inference_graph.pb', as_text=False) - - where ``rating/BiasAdd`` is an output node. - -3. Convert the model to the OpenVINO format. If you look at your frozen model, you can see that it has one input that is split into four ``ResourceGather`` layers. (Click image to zoom in.) - - .. image:: ../../../../../../assets/images/NCF_start.svg - - However, as the model conversion API does not support such data feeding, you should skip it. Cut - the edges incoming in ``ResourceGather`` port 1: - - .. code-block:: sh - - mo --input_model inference_graph.pb \ - --input 1:embedding/embedding_lookup,1:embedding_1/embedding_lookup, \ - 1:embedding_2/embedding_lookup,1:embedding_3/embedding_lookup \ - --input_shape [256],[256],[256],[256] \ - --output_dir - - In the ``input_shape`` parameter, 256 specifies the ``batch_size`` for your model. - -Alternatively, you can do steps 2 and 3 in one command line: - -.. code-block:: sh - - mo --input_meta_graph /path/to/model/model.meta \ - --input 1:embedding/embedding_lookup,1:embedding_1/embedding_lookup, \ - 1:embedding_2/embedding_lookup,1:embedding_3/embedding_lookup \ - --input_shape [256],[256],[256],[256] --output rating/BiasAdd \ - --output_dir - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection.rst deleted file mode 100644 index ad321a4abb3cda..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection.rst +++ /dev/null @@ -1,184 +0,0 @@ -Converting TensorFlow Object Detection API Models -================================================= - - -.. meta:: - :description: Learn how to convert Object Detection - API Models from TensorFlow to the OpenVINO Intermediate - Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -* Starting with the 2022.1 release, model conversion API can convert the TensorFlow Object Detection API Faster and Mask RCNNs topologies differently. By default, model conversion adds operation "Proposal" to the generated IR. This operation needs an additional input to the model with name "image_info" which should be fed with several values describing the preprocessing applied to the input image (refer to the :doc:`Proposal <../../../../../openvino-ir-format/operation-sets/operation-specs/detection/proposal-4>` operation specification for more information). However, this input is redundant for the models trained and inferred with equal size images. Model conversion API can generate IR for such models and insert operation :doc:`DetectionOutput <../../../../../openvino-ir-format/operation-sets/operation-specs/detection/detectionoutput-1>` instead of ``Proposal``. The `DetectionOutput` operation does not require additional model input "image_info". Moreover, for some models the produced inference results are closer to the original TensorFlow model. In order to trigger new behavior, the attribute "operation_to_add" in the corresponding JSON transformation configuration file should be set to value "DetectionOutput" instead of default one "Proposal". -* Starting with the 2021.1 release, model conversion API converts the TensorFlow Object Detection API SSDs, Faster and Mask RCNNs topologies keeping shape-calculating sub-graphs by default, so topologies can be re-shaped in the OpenVINO Runtime using dedicated reshape API. Refer to the :doc:`Using Shape Inference <../../../../../../openvino-workflow/running-inference/changing-input-shape>` guide for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size. -* To generate IRs for TF 1 SSD topologies, model conversion API creates a number of ``PriorBoxClustered`` operations instead of a constant node with prior boxes calculated for the particular input image size. This change allows you to reshape the topology in the OpenVINO Runtime using dedicated API. The reshaping is supported for all SSD topologies except FPNs, which contain hardcoded shapes for some operations preventing from changing topology input shape. - -Converting a Model -################## - -You can download TensorFlow Object Detection API models from the `TensorFlow 1 Detection Model Zoo `__ or `TensorFlow 2 Detection Model Zoo `__. - -.. note:: - - Before converting, make sure you have configured model conversion API. For configuration steps, refer to the :doc:`Convert a Model <../../../legacy-conversion-api>`. - -To convert a TensorFlow Object Detection API model, run the ``mo`` command with the following required parameters: - -* ``input_model `` - File with a pretrained model (binary or text .pb file after freezing) OR ``saved_model_dir `` for the TensorFlow 2 models -* ``transformations_config `` - A subgraph replacement configuration file with transformations description. For the models downloaded from the TensorFlow Object Detection API zoo, you can find the configuration files in the ``/openvino/tools/mo/front/tf`` directory. Use: - - * ``ssd_v2_support.json`` - for frozen SSD topologies from the models zoo version up to 1.13.X inclusively - * ``ssd_support_api_v.1.14.json`` - for SSD topologies trained using the TensorFlow Object Detection API version 1.14 up to 1.14.X inclusively - * ``ssd_support_api_v.1.15.json`` - for SSD topologies trained using the TensorFlow Object Detection API version 1.15 up to 2.0 - * ``ssd_support_api_v.2.0.json`` - for SSD topologies trained using the TensorFlow Object Detection API version 2.0 up to 2.3.X inclusively - * ``ssd_support_api_v.2.4.json`` - for SSD topologies trained using the TensorFlow Object Detection API version 2.4 or higher - * ``efficient_det_support_api_v.2.0.json`` - for EfficientDet topologies trained using the TensorFlow Object Detection API version 2.0 up to 2.3.X inclusively - * ``efficient_det_support_api_v.2.4.json`` - for EfficientDet topologies trained using the TensorFlow Object Detection API version 2.4 or higher - * ``faster_rcnn_support.json`` - for Faster R-CNN topologies from the TF 1.X models zoo trained with TensorFlow version up to 1.6.X inclusively - * ``faster_rcnn_support_api_v1.7.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 1.7.0 up to 1.9.X inclusively - * ``faster_rcnn_support_api_v1.10.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 1.10.0 up to 1.12.X inclusively - * ``faster_rcnn_support_api_v1.13.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 1.13.X - * ``faster_rcnn_support_api_v1.14.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 1.14.0 up to 1.14.X inclusively - * ``faster_rcnn_support_api_v1.15.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 1.15.0 up to 2.0 - * ``faster_rcnn_support_api_v2.0.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 2.0 up to 2.3.X inclusively - * ``faster_rcnn_support_api_v2.4.json`` - for Faster R-CNN topologies trained using the TensorFlow Object Detection API version 2.4 or higher - * ``mask_rcnn_support.json`` - for Mask R-CNN topologies from the TF 1.X models zoo trained with TensorFlow version 1.9.0 or lower. - * ``mask_rcnn_support_api_v1.7.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 1.7.0 up to 1.9.X inclusively - * ``mask_rcnn_support_api_v1.11.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 1.11.0 up to 1.12.X inclusively - * ``mask_rcnn_support_api_v1.13.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 1.13.0 up to 1.13.X inclusively - * ``mask_rcnn_support_api_v1.14.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 1.14.0 up to 1.14.X inclusively - * ``mask_rcnn_support_api_v1.15.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 1.15.0 up to 2.0 - * ``mask_rcnn_support_api_v2.0.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 2.0 up to 2.3.X inclusively - * ``mask_rcnn_support_api_v2.4.json`` - for Mask R-CNN topologies trained using the TensorFlow Object Detection API version 2.4 or higher - * ``rfcn_support.json`` - for RFCN topology from the models zoo trained with TensorFlow version up to 1.9.X inclusively - * ``rfcn_support_api_v1.10.json`` - for RFCN topology from the models zoo frozen with TensorFlow version 1.10.0 up to 1.12.X inclusively - * ``rfcn_support_api_v1.13.json`` - for RFCN topology from the models zoo frozen with TensorFlow version 1.13.X - * ``rfcn_support_api_v1.14.json`` - for RFCN topology from the models zoo frozen with TensorFlow version 1.14.0 or higher - -* ``tensorflow_object_detection_api_pipeline_config `` - A special configuration file that describes the topology hyper-parameters and structure of the TensorFlow Object Detection API model. For the models downloaded from the TensorFlow Object Detection API zoo, the configuration file is named ``pipeline.config``. If you plan to train a model yourself, you can find templates for these files in the `models repository `__. -* ``input_shape`` (optional) - A custom input image shape. For more information how the ``input_shape`` parameter is handled for the TensorFlow Object Detection API models, refer to the `Custom Input Shape <#Custom-Input-Shape>`__ guide. - -.. note:: - - The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the ``RGB<->BGR`` conversion specifying the command-line parameter: ``reverse_input_channels``. Otherwise, inference results may be incorrect. If you convert a TensorFlow Object Detection API model to use with the OpenVINO sample applications, you must specify the ``reverse_input_channels`` parameter. For more information about the parameter, refer to the **When to Reverse Input Channels** section of the :doc:`Converting a Model to Intermediate Representation (IR) <../../[legacy]-setting-input-shapes>` guide. - -Additionally to the mandatory parameters listed above you can use optional conversion parameters if needed. A full list of parameters is available in the :doc:`Converting a TensorFlow Model <../[legacy]-convert-tensorflow>` guide. - -For example, if you downloaded the pre-trained `SSD InceptionV2 topology `__ and extracted archive to the directory ``/tmp/ssd_inception_v2_coco_2018_01_28``, the sample command line to convert the model looks as follows: - -.. code-block:: sh - - mo --input_model=/tmp/ssd_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --transformations_config front/tf/ssd_v2_support.json --tensorflow_object_detection_api_pipeline_config /tmp/ssd_inception_v2_coco_2018_01_28/pipeline.config --reverse_input_channels - - -OpenVINO™ Toolkit Samples and Open Model Zoo Demos -################################################## - -OpenVINO comes with a number of samples to demonstrate use of OpenVINO Runtime API. Additionally, -Open Model Zoo provides set of demo applications to show implementation of close to real life applications, -based on deep learning in various tasks, including Image Classification, Visual Object Detection, Text Recognition, -Speech Recognition, Natural Language Processing and others. Refer to the links below for more details. - -* :doc:`OpenVINO Samples <../../../../../../learn-openvino/openvino-samples>` -* :doc:`Open Model Zoo Demos <../../../../model-zoo>` - -.. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - -Feeding Input Images to the Samples -################################### - -There are several important notes about feeding input images to the samples: - -1. OpenVINO samples stretch input image to the size of the input operation without preserving aspect ratio. This behavior is usually correct for most topologies (including SSDs), but incorrect for other models like Faster R-CNN, Mask R-CNN and R-FCN. These models usually use keeps aspect ratio resizer. The type of preprocessing is defined in the pipeline configuration file in the section ``image_resizer``. If keeping aspect ratio is used, then it is necessary to resize image before passing it to the sample and optionally pad the resized image with 0s (if the attribute "pad_to_max_dimension" in the pipeline.config is equal to "true"). - -2. TensorFlow implementation of image resize may be different from the one implemented in the sample. Even reading input image from compressed format (like ``.jpg``) could give different results in the sample and TensorFlow. If it is necessary to compare accuracy between the TensorFlow and the OpenVINO, it is recommended to pass pre-resized input image in a non-compressed format (like ``.bmp``). - -3. If you want to infer the model with the OpenVINO samples, convert the model specifying the ``reverse_input_channels`` command line parameter. The samples load images in BGR channels order, while TensorFlow models were trained with images in RGB order. When the ``reverse_input_channels`` command line parameter is specified, model conversion API performs first convolution or other channel dependent operation weights modification so the output will be like the image is passed with RGB channels order. - -4. Read carefully the messages printed by model conversion API. They contain important instructions on how to prepare input data before running the inference and how to interpret the output. - -Custom Input Shape -################## - -Model conversion handles the command line parameter ``input_shape`` for TensorFlow Object Detection API models in a special way depending on the image resizer type defined in the ``pipeline.config`` file. TensorFlow Object Detection API generates different ``Preprocessor`` sub-graph based on the image resizer type. Model conversion API supports two types of image resizer: - -* ``fixed_shape_resizer`` --- *Stretches* input image to the specific height and width. The ``pipeline.config`` snippet below shows a ``fixed_shape_resizer`` sample definition: - - .. code-block:: sh - - image_resizer { - fixed_shape_resizer { - height: 300 - width: 300 - } - } - -* ``keep_aspect_ratio_resizer`` --- Resizes the input image *keeping aspect ratio* to satisfy the minimum and maximum size constraints. The ``pipeline.config`` snippet below shows a ``keep_aspect_ratio_resizer`` sample definition: - - .. code-block:: sh - - image_resizer { - keep_aspect_ratio_resizer { - min_dimension: 600 - max_dimension: 1024 - } - } - -If an additional parameter "pad_to_max_dimension" is equal to "true", then the resized image will be padded with 0s to the square image of size "max_dimension". - -Fixed Shape Resizer Replacement -+++++++++++++++++++++++++++++++ - -* If the ``input_shape`` command line parameter is not specified, model conversion generates an input operation with the height and width as defined in the ``pipeline.config``. - -* If the ``input_shape [1, H, W, 3]`` command line parameter is specified, model conversion sets the input operation height to ``H`` and width to ``W`` and convert the model. However, the conversion may fail because of the following reasons: - - * The model is not reshape-able, meaning that it's not possible to change the size of the model input image. For example, SSD FPN models have ``Reshape`` operations with hard-coded output shapes, but the input size to these ``Reshape`` instances depends on the input image size. In this case, model conversion API shows an error during the shape inference phase. Run model conversion with ``log_level DEBUG`` to see the inferred operations output shapes to see the mismatch. - * Custom input shape is too small. For example, if you specify ``input_shape [1,100,100,3]`` to convert a SSD Inception V2 model, one of convolution or pooling nodes decreases input tensor spatial dimensions to non-positive values. In this case, model conversion API shows error message like this: '[ ERROR ] Shape [ 1 -1 -1 256] is not fully defined for output X of "node_name".' - - -Keeping Aspect Ratio Resizer Replacement -++++++++++++++++++++++++++++++++++++++++ - -* If the ``input_shape`` command line parameter is not specified, model conversion API generates an input operation with both height and width equal to the value of parameter ``min_dimension`` in the ``keep_aspect_ratio_resizer``. - -* If the ``input_shape [1, H, W, 3]`` command line parameter is specified, model conversion API scales the specified input image height ``H`` and width ``W`` to satisfy the ``min_dimension`` and ``max_dimension`` constraints defined in the ``keep_aspect_ratio_resizer``. The following function calculates the input operation height and width: - - .. code-block:: py - :force: - - def calculate_shape_keeping_aspect_ratio(H: int, W: int, min_dimension: int, max_dimension: int): - ratio_min = min_dimension / min(H, W) - ratio_max = max_dimension / max(H, W) - ratio = min(ratio_min, ratio_max) - return int(round(H * ratio)), int(round(W * ratio)) - -The ``input_shape`` command line parameter should be specified only if the "pad_to_max_dimension" does not exist of is set to "false" in the ``keep_aspect_ratio_resizer``. - -Models with ``keep_aspect_ratio_resizer`` were trained to recognize object in real aspect ratio, in contrast with most of the classification topologies trained to recognize objects stretched vertically and horizontally as well. By default, topologies are converted with ``keep_aspect_ratio_resizer`` to consume a square input image. If the non-square image is provided as input, it is stretched without keeping aspect ratio that results to object detection quality decrease. - -.. note:: - - It is highly recommended to specify the ``input_shape`` command line parameter for the models with ``keep_aspect_ratio_resizer``, if the input image dimensions are known in advance. - -Model Conversion Process in Detail -################################## - -This section is intended for users who want to understand how model conversion API performs Object Detection API models conversion in details. The information in this section is also useful for users having complex models that are not converted with model conversion API out of the box. It is highly recommended to read the **Graph Transformation Extensions** section in the :doc:`[Legacy] Model Optimizer Extensibility <../../../legacy-model-optimizer-extensibility>` documentation first to understand sub-graph replacement concepts which are used here. - -It is also important to open the model in the `TensorBoard `__ to see the topology structure. Model conversion API can create an event file that can be then fed to the TensorBoard tool. Run model conversion, providing two command line parameters: - -* ``input_model `` --- Path to the frozen model. -* ``tensorboard_logdir`` --- Path to the directory where TensorBoard looks for the event files. - -Implementation of the transformations for Object Detection API models is located in the `file `__. Refer to the code in this file to understand the details of the conversion process. - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-retina-net.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-retina-net.rst deleted file mode 100644 index db2c6424367f58..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-retina-net.rst +++ /dev/null @@ -1,31 +0,0 @@ -Converting a TensorFlow RetinaNet Model -======================================= - - -.. meta:: - :description: Learn how to convert a RetinaNet model - from TensorFlow to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python ../../../../../../learn-openvino/interactive-tutorials-python <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This tutorial explains how to convert a RetinaNet model to the Intermediate Representation (IR). - -`Public RetinaNet model `__ does not contain pretrained TensorFlow weights. -To convert this model to the TensorFlow format, follow the `Reproduce Keras to TensorFlow Conversion tutorial `__. - -After converting the model to TensorFlow format, run the following command: - -.. code-block:: sh - - mo --input "input_1[1,1333,1333,3]" --input_model retinanet_resnet50_coco_best_v2.1.0.pb --transformations_config front/tf/retinanet.json - - -Where ``transformations_config`` command-line parameter specifies the configuration json file containing model conversion hints for model conversion API. -The json file contains some parameters that need to be changed if you train the model yourself. It also contains information on how to match endpoints -to replace the subgraph nodes. After the model is converted to the OpenVINO IR format, the output nodes will be replaced with DetectionOutput layer. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-slim-library.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-slim-library.rst deleted file mode 100644 index 847d44fce813b1..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-slim-library.rst +++ /dev/null @@ -1,117 +0,0 @@ -Converting TensorFlow Slim Image Classification Model Library Models -==================================================================== - - -.. meta:: - :description: Learn how to convert a Slim Image - Classification model from TensorFlow to the OpenVINO - Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -`TensorFlow-Slim Image Classification Model Library `__ is a library to define, train and evaluate classification models in TensorFlow. The library contains Python scripts defining the classification topologies together with checkpoint files for several pre-trained classification topologies. To convert a TensorFlow-Slim library model, complete the following steps: - -1. Download the TensorFlow-Slim models `git repository `__. -2. Download the pre-trained model `checkpoint `__. -3. Export the inference graph. -4. Convert the model using model conversion API. - -The `Example of an Inception V1 Model Conversion <#example_of_an_inception_v1_model_conversion>`__ below illustrates the process of converting an Inception V1 Model. - -Example of an Inception V1 Model Conversion -########################################### - -This example demonstrates how to convert the model on Linux OSes, but it could be easily adopted for the Windows OSes. - -**Step 1**. Create a new directory to clone the TensorFlow-Slim git repository to: - -.. code-block:: sh - - mkdir tf_models - -.. code-block:: sh - - git clone https://github.com/tensorflow/models.git tf_models - - -**Step 2**. Download and unpack the `Inception V1 model checkpoint file `__: - -.. code-block:: sh - - wget http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz - -.. code-block:: sh - - tar xzvf inception_v1_2016_08_28.tar.gz - -**Step 3**. Export the inference graph --- the protobuf file (``.pb``) containing the architecture of the topology. This file *does not* contain the neural network weights and cannot be used for inference. - -.. code-block:: sh - - python3 tf_models/research/slim/export_inference_graph.py \ - --model_name inception_v1 \ - --output_file inception_v1_inference_graph.pb - - -Model conversion API comes with the summarize graph utility, which identifies graph input and output nodes. Run the utility to determine input/output nodes of the Inception V1 model: - -.. code-block:: sh - - python3 /openvino/tools/mo/utils/summarize_graph.py --input_model ./inception_v1_inference_graph.pb - -The output looks as follows: - -.. code-block:: sh - - 1 input(s) detected: - Name: input, type: float32, shape: (-1,224,224,3) - 1 output(s) detected: - InceptionV1/Logits/Predictions/Reshape_1 - -The tool finds one input node with name ``input``, type ``float32``, fixed image size ``(224,224,3)`` and undefined batch size ``-1``. The output node name is ``InceptionV1/Logits/Predictions/Reshape_1``. - -**Step 4**. Convert the model with the model conversion API: - -.. code-block:: sh - - mo --input_model ./inception_v1_inference_graph.pb --input_checkpoint ./inception_v1.ckpt -b 1 --mean_value [127.5,127.5,127.5] --scale 127.5 - - -The ``-b`` command line parameter is required because model conversion API cannot convert a model with undefined input size. - -For the information on why ``--mean_values`` and ``--scale`` command-line parameters are used, refer to the `Mean and Scale Values for TensorFlow-Slim Models <#Mean-and-Scale-Values-for-TensorFlow-Slim-Models>`__. - -Mean and Scale Values for TensorFlow-Slim Models -################################################# - -The TensorFlow-Slim Models were trained with normalized input data. There are several different normalization algorithms used in the Slim library. OpenVINO classification sample does not perform image pre-processing except resizing to the input layer size. It is necessary to pass mean and scale values to model conversion API so they are embedded into the generated IR in order to get correct classification results. - -The file `preprocessing_factory.py `__ contains a dictionary variable ``preprocessing_fn_map`` defining mapping between the model type and pre-processing function to be used. The function code should be analyzed to figure out the mean/scale values. - -The `inception_preprocessing.py `__ file defines the pre-processing function for the Inception models. The ``preprocess_for_eval`` function contains the following code: - -.. code-block:: py - :force: - - ... - import tensorflow as tf - if image.dtype != tf.float32: - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - ... - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image - - -Firstly, the ``image`` is converted to data type `tf.float32` and the values in the tensor are scaled to the ``[0, 1]`` range using the `tf.image.convert_image_dtype `__ function. Then the ``0.5`` is subtracted from the image values and values multiplied by ``2.0``. The final image range of values is ``[-1, 1]``. - -OpenVINO classification sample reads an input image as a three-dimensional array of integer values from the range ``[0, 255]``. In order to scale them to ``[-1, 1]`` range, the mean value ``127.5`` for each image channel should be specified as well as a scale factor ``127.5``. - -Similarly, the mean/scale values can be determined for other Slim models. - -The exact mean/scale values are defined in the table with list of supported TensorFlow-Slim models at the :doc:`Converting a TensorFlow Model <../[legacy]-convert-tensorflow>` guide. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-wide-and-deep-family.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-wide-and-deep-family.rst deleted file mode 100644 index d2f83fa12d8e67..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-wide-and-deep-family.rst +++ /dev/null @@ -1,166 +0,0 @@ -Converting TensorFlow Wide and Deep Family Models -================================================= - - -.. meta:: - :description: Learn how to convert Wide and Deep Family - models from TensorFlow to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -The Wide and Deep models is a combination of wide and deep parts for memorization and generalization of object features respectively. -These models can contain different types of object features such as numerical, categorical, sparse and sequential features. These feature types are specified -through Tensorflow tf.feature_column API. Table below presents what feature types are supported by the OpenVINO toolkit. - -.. list-table:: - :header-rows: 1 - - * - numeric - - (weighted) categorical - - categorical with hash - - bucketized - - sequential - - crossed - * - yes - - yes - - no - - yes - - yes - - no - - -.. note:: The categorical with hash and crossed features are currently unsupported since OpenVINO does not cover tensors of the `string` type and operations with them. - -Preparing an Example of Wide and Deep Model -########################################### - -**Step 1**. Clone the GitHub repository with TensorFlow models and move to the directory with an example of Wide and Deep model: - -.. code-block:: sh - - git clone https://github.com/tensorflow/models.git --branch r2.2.0; - cd official/r1/wide_deep - - -The Wide and Deep model is no longer in the master branch of the repository but is still available in the r2.2.0 branch. - - -**Step 2**. Train the model - -As the OpenVINO™ toolkit does not support the categorical with hash and crossed features, such feature types must be switched off in the model -by changing the ``build_model_columns()`` function in `census_dataset.py` as follows: - -.. code-block:: py - :force: - - def build_model_columns(): - """Builds a set of wide and deep feature columns.""" - # Continuous variable columns - age = tf.feature_column.numeric_column('age') - education_num = tf.feature_column.numeric_column('education_num') - capital_gain = tf.feature_column.numeric_column('capital_gain') - capital_loss = tf.feature_column.numeric_column('capital_loss') - hours_per_week = tf.feature_column.numeric_column('hours_per_week') - education = tf.feature_column.categorical_column_with_vocabulary_list( - 'education', [ - 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college', - 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school', - '5th-6th', '10th', '1st-4th', 'Preschool', '12th']) - marital_status = tf.feature_column.categorical_column_with_vocabulary_list( - 'marital_status', [ - 'Married-civ-spouse', 'Divorced', 'Married-spouse-absent', - 'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed']) - relationship = tf.feature_column.categorical_column_with_vocabulary_list( - 'relationship', [ - 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried', - 'Other-relative']) - workclass = tf.feature_column.categorical_column_with_vocabulary_list( - 'workclass', [ - 'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov', - 'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked']) - # To show an example of hashing: - #occupation = tf.feature_column.categorical_column_with_hash_bucket( - # 'occupation', hash_bucket_size=_HASH_BUCKET_SIZE) - # Transformations. - age_buckets = tf.feature_column.bucketized_column( - age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) - # Wide columns and deep columns. - base_columns = [ - education, marital_status, relationship, workclass, - age_buckets, - ] - crossed_columns = [] - wide_columns = base_columns + crossed_columns - deep_columns = [ - age, - education_num, - capital_gain, - capital_loss, - hours_per_week, - tf.feature_column.indicator_column(workclass), - tf.feature_column.indicator_column(education), - tf.feature_column.indicator_column(marital_status), - tf.feature_column.indicator_column(relationship), - # To show an example of embedding - ] - return wide_columns, deep_columns - -After that, start training with the following command: - -.. code-block:: sh - - python census_main.py - - -Converting the Wide and Deep Model to IR -######################################## - -Use the following command line to convert the saved model file with the checkpoint: - -.. code-block:: sh - - mo - --input_checkpoint checkpoint --input_meta_graph model.ckpt.meta - --input "IteratorGetNext:0[2], - IteratorGetNext:1[2], - IteratorGetNext:2[2], - IteratorGetNext:4[2], - IteratorGetNext:7[2], - linear/linear_model/linear_model/linear_model/education/to_sparse_input/indices:0[10,2]{i64}, - linear/linear_model/linear_model/linear_model/education/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - linear/linear_model/linear_model/linear_model/education/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - linear/linear_model/linear_model/linear_model/marital_status/to_sparse_input/indices:0[10,2]{i64}, - linear/linear_model/linear_model/linear_model/marital_status/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - linear/linear_model/linear_model/linear_model/marital_status/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - linear/linear_model/linear_model/linear_model/relationship/to_sparse_input/indices:0[10,2]{i64}, - linear/linear_model/linear_model/linear_model/relationship/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - linear/linear_model/linear_model/linear_model/relationship/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - linear/linear_model/linear_model/linear_model/workclass/to_sparse_input/indices:0[10,2]{i64}, - linear/linear_model/linear_model/linear_model/workclass/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - linear/linear_model/linear_model/linear_model/workclass/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - dnn/input_from_feature_columns/input_layer/education_indicator/to_sparse_input/indices:0[10,2]{i64}, - dnn/input_from_feature_columns/input_layer/education_indicator/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - dnn/input_from_feature_columns/input_layer/education_indicator/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - dnn/input_from_feature_columns/input_layer/marital_status_indicator/to_sparse_input/indices:0[10,2]{i64}, - dnn/input_from_feature_columns/input_layer/marital_status_indicator/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - dnn/input_from_feature_columns/input_layer/marital_status_indicator/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - dnn/input_from_feature_columns/input_layer/relationship_indicator/to_sparse_input/indices:0[10,2]{i64}, - dnn/input_from_feature_columns/input_layer/relationship_indicator/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - dnn/input_from_feature_columns/input_layer/relationship_indicator/to_sparse_input/dense_shape:0[2]{i64}->[2,50], - dnn/input_from_feature_columns/input_layer/workclass_indicator/to_sparse_input/indices:0[10,2]{i64}, - dnn/input_from_feature_columns/input_layer/workclass_indicator/hash_table_Lookup/LookupTableFindV2:0[10]{i64}, - dnn/input_from_feature_columns/input_layer/workclass_indicator/to_sparse_input/dense_shape:0[2]{i64}->[2,50]" - --output head/predictions/probabilities - - -The model contains operations unsupported by the OpenVINO™ toolkit such as ``IteratorGetNext`` and ``LookupTableFindV2``, so the Model Optimizer must prune these nodes. -The pruning is specified through `--input` option. The prunings for ``IteratorGetNext:*`` nodes correspond to numeric features. -The pruning for each categorical feature consists of three prunings for the following nodes: ``*/to_sparse_input/indices:0``, ``*/hash_table_Lookup/LookupTableFindV2:0``, and ``*/to_sparse_input/dense_shape:0``. - -The above command line generates an OpenVINO model for a batch of two objects, with the total number of actual categorical feature values equal to 10 and maximum size of a sparse categorical feature for one object equal to 50. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-xlnet.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-xlnet.rst deleted file mode 100644 index 853614de85feed..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-xlnet.rst +++ /dev/null @@ -1,208 +0,0 @@ -Converting a TensorFlow XLNet Model -=================================== - - -.. meta:: - :description: Learn how to convert an XLNet model from - TensorFlow to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -Pretrained models for XLNet (Bidirectional Encoder Representations from Transformers) are -`publicly available `__. - -Supported Models -################ - -The following models from the pretrained `XLNet model list `__ are currently supported: - -* `XLNet-Large, Cased `__ -* `XLNet-Base, Cased `__ - -Downloading the Pretrained Base XLNet Model -########################################### - -Download and unzip an archive with the `XLNet-Base, Cased `__. - -After the archive is unzipped, the directory ``cased_L-12_H-768_A-12`` is created and contains the following files: - -* TensorFlow checkpoint (``xlnet_model.ckpt``), containing the pretrained weights (which is actually 3 files) -* sentence piece model (``spiece.model``) used for (de)tokenization -* config file (``xlnet_config.json``), which specifies the hyperparameters of the model - -To get pb-file from the archive contents, you need to do the following. - -1. Run commands - - .. code-block:: sh - - cd ~ - mkdir XLNet-Base - cd XLNet-Base - git clone https://github.com/zihangdai/xlnet - wget https://storage.googleapis.com/xlnet/released_models/cased_L-12_H-768_A-12.zip - unzip cased_L-12_H-768_A-12.zip - mkdir try_save - - -2. Save and run the following Python script in `~/XLNet-Base/xlnet`: - - .. note:: The original model repository has been tested with TensorFlow 1.13.1 under Python2. - - .. code-block:: py - :force: - - from collections import namedtuple - - import tensorflow as tf - from tensorflow.python.framework import graph_io - - import model_utils - import xlnet - - LENGTHS = 50 - BATCH = 1 - OUTPUT_DIR = '~/XLNet-Base/try_save/' - INIT_CKPT_PATH = '~/XLNet-Base/xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt' - XLNET_CONFIG_PATH = '~/XLNet-Base/xlnet_cased_L-12_H-768_A-12/xlnet_config.json' - - FLags = namedtuple('FLags', 'use_tpu init_checkpoint') - FLAGS = FLags(use_tpu=False, init_checkpoint=INIT_CKPT_PATH) - - xlnet_config = xlnet.XLNetConfig(json_path=XLNET_CONFIG_PATH) - run_config = xlnet.RunConfig(is_training=False, use_tpu=False, use_bfloat16=False, dropout=0.1, dropatt=0.1,) - - - sentence_features_input_idx = tf.compat.v1.placeholder(tf.int32, shape=[LENGTHS, BATCH], name='input_ids') - sentence_features_segment_ids = tf.compat.v1.placeholder(tf.int32, shape=[LENGTHS, BATCH], name='seg_ids') - sentence_features_input_mask = tf.compat.v1.placeholder(tf.float32, shape=[LENGTHS, BATCH], name='input_mask') - - with tf.compat.v1.Session() as sess: - xlnet_model = xlnet.XLNetModel(xlnet_config=xlnet_config, run_config=run_config, - input_ids=sentence_features_input_idx, - seg_ids=sentence_features_segment_ids, - input_mask=sentence_features_input_mask) - - sess.run(tf.compat.v1.global_variables_initializer()) - model_utils.init_from_checkpoint(FLAGS, True) - - # Save the variables to disk. - saver = tf.compat.v1.train.Saver() - - # Saving checkpoint - save_path = saver.save(sess, OUTPUT_DIR + "model.ckpt") - - # Freezing model - outputs = ['model/transformer/dropout_2/Identity'] - graph_def_freezed = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), outputs) - - # Saving non-frozen and frozen model to pb - graph_io.write_graph(sess.graph.as_graph_def(), OUTPUT_DIR, 'model.pb', as_text=False) - graph_io.write_graph(graph_def_freezed,OUTPUT_DIR, 'model_frozen.pb', - as_text=False) - - # Write to tensorboard - with tf.compat.v1.summary.FileWriter(logdir=OUTPUT_DIR, graph_def=graph_def_freezed) as writer: - writer.flush() - -Downloading the Pretrained Large XLNet Model -############################################ - -Download and unzip an archive with the `XLNet-Base, Cased `__. - -After unzipping the archive, the directory ``cased_L-12_H-1024_A-16`` is created and contains the following files: - -* TensorFlow checkpoint (``xlnet_model.ckpt``) containing the pretrained weights (which is actually 3 files) -* sentence piece model (``spiece.model``) used for (de)tokenization -* config file (``xlnet_config.json``) which specifies the hyperparameters of the model - -To get ``pb-file`` from the archive contents, follow the instructions below: - -1. Run commands - - .. code-block:: sh - - cd ~ - mkdir XLNet-Large - cd XLNet-Large - git clone https://github.com/zihangdai/xlnet - wget https://storage.googleapis.com/xlnet/released_models/cased_L-24_H-1024_A-16.zip - unzip cased_L-24_H-1024_A-16.zip - mkdir try_save - - -2. Save and run the following Python script in ``~/XLNet-Large/xlnet``: - - .. code-block:: py - :force: - - from collections import namedtuple - - import tensorflow as tf - from tensorflow.python.framework import graph_io - - import model_utils - import xlnet - - LENGTHS = 50 - BATCH = 1 - OUTPUT_DIR = '~/XLNet-Large/try_save' - INIT_CKPT_PATH = '~/XLNet-Large/cased_L-24_H-1024_A-16/xlnet_model.ckpt' - XLNET_CONFIG_PATH = '~/XLNet-Large/cased_L-24_H-1024_A-16/xlnet_config.json' - - FLags = namedtuple('FLags', 'use_tpu init_checkpoint') - FLAGS = FLags(use_tpu=False, init_checkpoint=INIT_CKPT_PATH) - - xlnet_config = xlnet.XLNetConfig(json_path=XLNET_CONFIG_PATH) - run_config = xlnet.RunConfig(is_training=False, use_tpu=False, use_bfloat16=False, dropout=0.1, dropatt=0.1,) - - - sentence_features_input_idx = tf.compat.v1.placeholder(tf.int32, shape=[LENGTHS, BATCH], name='input_ids') - sentence_features_segment_ids = tf.compat.v1.placeholder(tf.int32, shape=[LENGTHS, BATCH], name='seg_ids') - sentence_features_input_mask = tf.compat.v1.placeholder(tf.float32, shape=[LENGTHS, BATCH], name='input_mask') - - with tf.compat.v1.Session() as sess: - xlnet_model = xlnet.XLNetModel(xlnet_config=xlnet_config, run_config=run_config, - input_ids=sentence_features_input_idx, - seg_ids=sentence_features_segment_ids, - input_mask=sentence_features_input_mask) - - sess.run(tf.compat.v1.global_variables_initializer()) - model_utils.init_from_checkpoint(FLAGS, True) - - # Save the variables to disk. - saver = tf.compat.v1.train.Saver() - - # Saving checkpoint - save_path = saver.save(sess, OUTPUT_DIR + "model.ckpt") - - # Freezing model - outputs = ['model/transformer/dropout_2/Identity'] - graph_def_freezed = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), outputs) - - # Saving non-frozen and frozen model to pb - graph_io.write_graph(sess.graph.as_graph_def(), OUTPUT_DIR, 'model.pb', as_text=False) - graph_io.write_graph(graph_def_freezed,OUTPUT_DIR, 'model_frozen.pb', - as_text=False) - - # Write to tensorboard - with tf.compat.v1.summary.FileWriter(logdir=OUTPUT_DIR, graph_def=graph_def_freezed) as writer: - writer.flush() - - -The script should save into ``~/XLNet-Large/xlnet``. - -Converting a frozen TensorFlow XLNet Model to IR -################################################# - -To generate the XLNet Intermediate Representation (IR) of the model, run model conversion with the following parameters: - -.. code-block:: sh - - mo --input_model path-to-model/model_frozen.pb \ - --input "input_mask[50,1],input_ids[50,1],seg_ids[50,1]" - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-yolo.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-yolo.rst deleted file mode 100644 index e7e8072b1bda05..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-yolo.rst +++ /dev/null @@ -1,322 +0,0 @@ -Converting TensorFlow YOLO Models -================================= - - -.. meta:: - :description: Learn how to convert YOLO models from - TensorFlow to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials <../../../../../../learn-openvino/interactive-tutorials-python>`. - -This document explains how to convert real-time object detection YOLOv1, YOLOv2, YOLOv3 and YOLOv4 public models to the Intermediate Representation (IR). All YOLO models are originally implemented in the DarkNet framework and consist of two files: - -* The ``.cfg`` file with model configurations -* The ``.weights`` file with model weights - -Depending on a YOLO model version, the ``convert_model()`` method converts it differently: - -- YOLOv4 must be first converted from Keras to TensorFlow 2. -- YOLOv3 has several implementations. This tutorial uses a TensorFlow implementation of YOLOv3 model, which can be directly converted to an IR. -- YOLOv1 and YOLOv2 models must be first converted to TensorFlow using DarkFlow. - -Converting a YOLOv4 Model to IR -############################### - -This section explains how to convert the YOLOv4 Keras model from the `repository `__ to an IR. To convert the YOLOv4 model, follow the instructions below: - -1. Download YOLOv4 weights and associated with it cfg file: - - - for YOLOv4 ( `weights `__ / `config file `__ ) - - for YOLOv4-tiny ( `weights `__ / `config file `__ ) - -2. Clone the repository with the YOLOv4 model: - - .. code-block:: sh - - git clone https://github.com/david8862/keras-YOLOv3-model-set - - -3. Convert the model to the TensorFlow 2 format: - - - for YOLOv4: - - .. code-block:: sh - - python keras-YOLOv3-model-set/tools/model_converter/convert.py /yolov4.cfg /yolov4.weights - - - - for YOLOv4-tiny: - - .. code-block:: sh - - python keras-YOLOv3-model-set/tools/model_converter/convert.py /yolov4-tiny.cfg /yolov4-tiny.weights - - -4. Run model conversion from the TensorFlow 2 to an IR format: - - .. note:: - - Before you run the conversion, make sure you have installed all the model conversion API dependencies for TensorFlow 2. - - If you get errors, you may need to add the additional step to divide the input by 255: - - .. code-block:: sh - - --scale_values=image_input[255] - - - .. code-block:: sh - - mo --saved_model_dir yolov4 --output_dir models/IRs --input_shape [1,608,608,3] --model_name yolov4 - - -Converting YOLOv3 Model to the OpenVINO format -############################################## - -There are several public versions of TensorFlow YOLOv3 model implementation available on GitHub. This section explains how to convert YOLOv3 model from -the `repository `__ (commit ed60b90) to an IR , but the process is similar for other versions of TensorFlow YOLOv3 model. - -Overview of YOLOv3 Model Architecture -+++++++++++++++++++++++++++++++++++++ - -Originally, YOLOv3 model includes feature extractor called ``Darknet-53`` with three branches at the end that make detections at three different scales. These branches must end with the YOLO ``Region`` layer. - -``Region`` layer was first introduced in the DarkNet framework. Other frameworks, including TensorFlow, do not have the ``Region`` implemented as a single layer, so every author of public YOLOv3 model creates it using simple layers. This badly affects performance. For this reason, the main idea of YOLOv3 model conversion to IR is to cut off these custom ``Region`` -like parts of the model and complete the model with the ``Region`` layers where required. - -Dumping a YOLOv3 TensorFlow Model -+++++++++++++++++++++++++++++++++ - -To dump TensorFlow model out of `GitHub repository `__ (commit ed60b90), follow the instructions below: - -1. Clone the repository: - - .. code-block:: sh - - git clone https://github.com/mystic123/tensorflow-yolo-v3.git - cd tensorflow-yolo-v3 - - -2. (Optional) Checkout to the commit that the conversion was tested on: - - .. code-block:: sh - - git checkout ed60b90 - - -3. Download `coco.names `__ file from the DarkNet website **OR** use labels that fit your task. -4. Download the `yolov3.weights `__ (for the YOLOv3 model) or `yolov3-tiny.weights `__ (for the YOLOv3-tiny model) file **OR** use your pre-trained weights with the same structure. -5. Install PIL, which is used by the conversion script in the repo: - - .. code-block:: sh - - pip install pillow - - -6. Run a converter: - - .. note:: This converter works with TensorFlow 1.x and numpy 1.19 or lower. - - - - For YOLO-v3: - - .. code-block:: sh - - python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weights_file yolov3.weights - - - - For YOLOv3-tiny: - - .. code-block:: sh - - python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weights_file yolov3-tiny.weights --tiny - - - At this step, you may receive a warning like ``WARNING:tensorflow:Entity <...> could not be transformed and will be executed as-is.``. To work around this issue, switch to gast 0.2.2 with the following command: - - .. code-block:: sh - - pip3 install --user gast==0.2.2 - - -If you have YOLOv3 weights trained for an input image with the size different from 416 (320, 608 or your own), provide the ``--size`` key with the size of your image specified while running the converter. For example, run the following command for an image with size 608: - -.. code-block:: sh - - python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weights_file yolov3_608.weights --size 608 - - -Converting a YOLOv3 TensorFlow Model to the OpenVINO format -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -To solve the problems explained in the `YOLOv3 architecture overview <#overview-of-yolov3-model-architecture>`__ section, use the ``yolo_v3.json`` or ``yolo_v3_tiny.json`` (depending on a model) configuration file with custom operations located in the ``/tools/model_optimizer/extensions/front/tf`` repository. - -It consists of several attributes: - -.. code-block:: sh - - [ - { - "id": "TFYOLOV3", - "match_kind": "general", - "custom_attributes": { - "classes": 80, - "anchors": [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326], - "coords": 4, - "num": 9, - "masks":[[6, 7, 8], [3, 4, 5], [0, 1, 2]], - "entry_points": ["detector/yolo-v3/Reshape", "detector/yolo-v3/Reshape_4", "detector/yolo-v3/Reshape_8"] - } - } - ] - - -where: - -- ``id`` and ``match_kind`` are parameters that you cannot change. -- ``custom_attributes`` is a parameter that stores all the YOLOv3 specific attributes: - - - ``classes``, ``coords``, ``num``, and ``masks`` are attributes that you should copy from the configuration file that was used for model training. If you used DarkNet officially shared weights, you can use ``yolov3.cfg`` or ``yolov3-tiny.cfg`` configuration file from `GitHub repository `__. Replace the default values in ``custom_attributes`` with the parameters that follow the ``[yolo]`` titles in the configuration file. - - ``anchors`` is an optional parameter that is not used while inference of the model, but it used in a demo to parse ``Region`` layer output - - ``entry_points`` is a node name list to cut off the model and append the ``Region`` layer with custom attributes specified above. - - -To generate an IR of the YOLOv3 TensorFlow model, run: - -.. code-block:: sh - - mo \ - --input_model /path/to/yolo_v3.pb \ - --transformations_config front/tf/yolo_v3.json \ - --batch 1 \ - --output_dir - - -To generate an IR of the YOLOv3-tiny TensorFlow model, run: - -.. code-block:: sh - - mo \ - --input_model /path/to/yolo_v3_tiny.pb \ - --transformations_config front/tf/yolo_v3_tiny.json \ - --batch 1 \ - --output_dir - - -where: - -* ``batch`` defines shape of model input. In the example, ``batch`` is equal to 1, but you can also specify other integers larger than 1. -* ``transformations_config`` adds missing ``Region`` layers to the model. In the IR, the ``Region`` layer has name ``RegionYolo``. - -.. note:: - - The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the ``RGB<->BGR`` conversion specifying the command-line parameter: ``reverse_input_channels``. Otherwise, inference results may be incorrect. For more information about the parameter, refer to the **When to Reverse Input Channels** section of the :doc:`Converting a Model to Intermediate Representation (IR) <../../[legacy]-setting-input-shapes>` guide. - - -OpenVINO toolkit provides a demo that uses YOLOv3 model. Refer to the `Object Detection C++ Demo `__ for more information. - -Converting YOLOv1 and YOLOv2 Models to the IR -############################################# - -Before converting, choose a YOLOv1 or YOLOv2 model version that best suits your task. Download model configuration file and corresponding weight file: - -* From `DarkFlow repository `__ : configuration files are stored in the ``cfg`` directory, links to weight files are given in the ``README.md`` file. The files from this repository are adapted for conversion to TensorFlow using DarkFlow. -* From DarkNet website and repository: configuration files are stored in the ``cfg`` directory of the `repository `__, links to weight files are given on the `YOLOv1 `__ and `YOLOv2 `__ websites. - -To convert DarkNet YOLOv1 and YOLOv2 models to the OpenVINO format, follow these steps: - -1. `Install DarkFlow <#installing-darkflow>`__ -2. `Convert DarkNet YOLOv1 or YOLOv2 model to TensorFlow <#converting-a-darknet-yolov1-or-yolov2-model-to-tensorflow>`__ using DarkFlow -3. `Convert TensorFlow YOLOv1 or YOLOv2 model to IR <#converting-a-tensorflow-yolov1-or-yolov2-model-to-the-ir>`__ - - -Installing DarkFlow -+++++++++++++++++++++ - -You need DarkFlow to convert YOLOv1 and YOLOv2 models to TensorFlow. To install DarkFlow: - -1. Install DarkFlow `required dependencies `__. -2. Clone DarkFlow git repository: - - .. code-block:: sh - - git clone https://github.com/thtrieu/darkflow.git - - -3. Go to the root directory of the cloned repository: - - .. code-block:: sh - - cd darkflow - - -4. Install DarkFlow, using the instructions from the ``README.md`` file in the `DarkFlow repository `__. - - -Converting a DarkNet YOLOv1 or YOLOv2 Model to TensorFlow -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -To convert YOLOv1 or YOLOv2 model to TensorFlow, go to the root directory of the cloned DarkFlow repository, place the previously downloaded \*.cfg and \*.weights files in the current directory and run the following command: - -- For YOLOv1: - - .. code-block:: sh - - python3 flow --model yolov1.cfg --load yolov1.weights --savepb - - -- For YOLOv2 with VOC dataset ``--labels`` argument should be specified and additional changes in the original exporting script are required. In the `file `__ change line 121 from ``self.offset = 16`` to ``self.offset = 20``. Then run: - - .. code-block:: sh - - python3 flow --model yolov2-voc.cfg --load yolov2-voc.weights --labels voc-labels.txt --savepb - - -VOC labels can be found on the following `link `__ - -General conversion command is: - -.. code-block:: sh - - python3 flow --model /.cfg --load /.weights --labels --savepb - - -For YOLOv1, the ``--labels`` argument can be skipped. If the model was successfully converted, you can find the ``.meta`` and ``.pb`` files. -in ``built_graph`` subdirectory of the cloned DarkFlow repository. - -File ``.pb`` is a TensorFlow representation of the YOLO model. - -Converting a TensorFlow YOLOv1 or YOLOv2 Model to the IR -++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Converted TensorFlow YOLO model is missing ``Region`` layer and its parameters. Original YOLO ``Region`` layer parameters are stored in the configuration ``/.cfg`` file under the ``[region]`` title. - -To recreate the original model structure, use the corresponding yolo ``.json`` configuration file with custom operations and ``Region`` layer parameters when converting the model to the IR. This file is located in the ``/tools/model_optimizer/extensions/front/tf`` directory. - -If chosen model has specific values of these parameters, create another configuration file with custom operations and use it for conversion. - -To generate the IR of the YOLOv1 model, provide TensorFlow YOLOv1 or YOLOv2 model to model conversion API with the following parameters: - -.. code-block:: sh - - mo - --input_model /.pb \ - --batch 1 \ - --scale 255 \ - --transformations_config front/tf/.json - - -where: - -* ``batch`` defines shape of model input. In the example, ``batch`` is equal to 1, but you can also specify other integers larger than 1. -* ``scale`` specifies scale factor that input values will be divided by. The model was trained with input values in the range ``[0,1]``. OpenVINO toolkit samples read input images as values in ``[0,255]`` range, so the scale 255 must be applied. -* ``transformations_config`` adds missing ``Region`` layers to the model. In the IR, the ``Region`` layer has name ``RegionYolo``. For other applicable parameters, refer to the :doc:`Convert Model from TensorFlow <../[legacy]-convert-tensorflow>` guide. - -.. note:: - - The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the ``RGB<->BGR`` conversion specifying the command-line parameter: ``reverse_input_channels``. Otherwise, inference results may be incorrect. For more information about the parameter, refer to the **When to Reverse Input Channels** section of the :doc:`Converting a Model to Intermediate Representation (IR) <../../[legacy]-setting-input-shapes>` guide. - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-onnx.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-onnx.rst deleted file mode 100644 index a864a037d488b7..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-onnx.rst +++ /dev/null @@ -1,70 +0,0 @@ -[LEGACY] Converting an ONNX Model -============================================= - -.. meta:: - :description: Learn how to convert a model from the - ONNX format to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting an ONNX Model <../../../../../openvino-workflow/model-preparation/convert-model-onnx>` article. - - -.. note:: ONNX models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <../../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. - -Converting an ONNX Model -######################## - -The model conversion process assumes you have an ONNX model that was directly downloaded from a public repository or converted from any framework that supports exporting to the ONNX format. - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - To convert an ONNX model, run ``convert_model()`` method with the path to the ``.onnx`` file: - - .. code-block:: py - :force: - - import openvino - from openvino.tools.mo import convert_model - - core = openvino.Core() - ov_model = convert_model(".onnx") - compiled_model = core.compile_model(ov_model, "AUTO") - - .. important:: - - The ``convert_model()`` method returns ``ov.Model`` that you can optimize, compile, or save to a file for subsequent use. - - .. tab-item:: CLI - :sync: cli - - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. - - .. code-block:: sh - - mo --input_model .onnx - - -There are no ONNX-specific parameters, so only framework-agnostic parameters are available to convert your model. For details, see the *General Conversion Parameters* section in the :doc:`Converting a Model to Intermediate Representation (IR) <../[legacy]-setting-input-shapes>` guide. - -Supported ONNX Layers -##################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations <../../../../../about-openvino/compatibility-and-support/supported-operations>` page. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials <[legacy]-conversion-tutorials>` page for a set of tutorials providing step-by-step instructions for converting specific ONNX models. Here are some examples: - -* :doc:`Convert ONNX Faster R-CNN Model <[legacy]-conversion-tutorials/convert-onnx-faster-r-cnn>` -* :doc:`Convert ONNX GPT-2 Model <[legacy]-conversion-tutorials/convert-onnx-gpt-2>` -* :doc:`Convert ONNX Mask R-CNN Model <[legacy]-conversion-tutorials/convert-onnx-mask-r-cnn>` - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-paddle.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-paddle.rst deleted file mode 100644 index 041a14f93547b6..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-paddle.rst +++ /dev/null @@ -1,139 +0,0 @@ -[LEGACY] Converting a PaddlePaddle Model -====================================================== - - -.. meta:: - :description: Learn how to convert a model from the - PaddlePaddle format to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a PaddlePaddle Model <../../../../../openvino-workflow/model-preparation/convert-model-paddle>` article. - - -This page provides general instructions on how to convert a model from a PaddlePaddle format to the OpenVINO IR format using Model Optimizer. The instructions are different depending on PaddlePaddle model format. - -.. note:: PaddlePaddle models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <../../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. - -Converting PaddlePaddle Model Inference Format -############################################## - -PaddlePaddle inference model includes ``.pdmodel`` (storing model structure) and ``.pdiparams`` (storing model weight). For how to export PaddlePaddle inference model, please refer to the `Exporting PaddlePaddle Inference Model `__ Chinese guide. - - -To convert a PaddlePaddle model, use the ``mo`` script and specify the path to the input ``.pdmodel`` model file: - -.. code-block:: sh - - mo --input_model .pdmodel - -**For example**, this command converts a yolo v3 PaddlePaddle network to OpenVINO IR network: - -.. code-block:: sh - - mo --input_model=yolov3.pdmodel --input=image,im_shape,scale_factor --input_shape=[1,3,608,608],[1,2],[1,2] --reverse_input_channels --output=save_infer_model/scale_0.tmp_1,save_infer_model/scale_1.tmp_1 - -Converting PaddlePaddle Model From Memory Using Python API -########################################################## - -Model conversion API supports passing the following PaddlePaddle models directly from memory: - -* ``paddle.hapi.model.Model`` -* ``paddle.fluid.dygraph.layers.Layer`` -* ``paddle.fluid.executor.Executor`` - -When you convert certain PaddlePaddle models, you may need to set the ``example_input`` or ``example_output`` parameters first. Below you will find examples that show how to convert aforementioned model formats using the parameters. - -* ``paddle.hapi.model.Model`` - - .. code-block:: py - :force: - - import paddle - from openvino.tools.mo import convert_model - - # create a paddle.hapi.model.Model format model - resnet50 = paddle.vision.models.resnet50() - x = paddle.static.InputSpec([1,3,224,224], 'float32', 'x') - y = paddle.static.InputSpec([1,1000], 'float32', 'y') - - model = paddle.Model(resnet50, x, y) - - # convert to OpenVINO IR format - ov_model = convert_model(model) - - # optional: serialize OpenVINO IR to *.xml & *.bin - from openvino.runtime import serialize - serialize(ov_model, "ov_model.xml", "ov_model.bin") - -* ``paddle.fluid.dygraph.layers.Layer`` - - ``example_input`` is required while ``example_output`` is optional, and accept the following formats: - - ``list`` with tensor(``paddle.Tensor``) or InputSpec(``paddle.static.input.InputSpec``) - - .. code-block:: py - :force: - - import paddle - from openvino.tools.mo import convert_model - - # create a paddle.fluid.dygraph.layers.Layer format model - model = paddle.vision.models.resnet50() - x = paddle.rand([1,3,224,224]) - - # convert to OpenVINO IR format - ov_model = convert_model(model, example_input=[x]) - -* ``paddle.fluid.executor.Executor`` - - ``example_input`` and ``example_output`` are required, and accept the following formats: - - ``list`` or ``tuple`` with variable(``paddle.static.data``) - - .. code-block:: py - :force: - - import paddle - from openvino.tools.mo import convert_model - - paddle.enable_static() - - # create a paddle.fluid.executor.Executor format model - x = paddle.static.data(name="x", shape=[1,3,224]) - y = paddle.static.data(name="y", shape=[1,3,224]) - relu = paddle.nn.ReLU() - sigmoid = paddle.nn.Sigmoid() - y = sigmoid(relu(x)) - - exe = paddle.static.Executor(paddle.CPUPlace()) - exe.run(paddle.static.default_startup_program()) - - # convert to OpenVINO IR format - ov_model = convert_model(exe, example_input=[x], example_output=[y]) - - -.. important:: - - The ``convert_model()`` method returns ``ov.Model`` that you can optimize, compile, or save to a file for subsequent use. - - -Supported PaddlePaddle Layers -############################# - -For the list of supported standard layers, refer to the :doc:`Supported Operations <../../../../../about-openvino/compatibility-and-support/supported-operations>` page. - -Frequently Asked Questions (FAQ) -################################ - -The model conversion API displays explanatory messages for typographical errors, incorrectly used options, or other issues. They describe the potential cause of the problem and give a link to the :doc:`Model Optimizer FAQ <../[legacy]-model-optimizer-faq>`, which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in :doc:`Convert a Model <../../legacy-conversion-api>` to help you understand what went wrong. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials <[legacy]-conversion-tutorials>` page for a set of tutorials providing step-by-step instructions for converting specific PaddlePaddle models. - - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-pytorch.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-pytorch.rst deleted file mode 100644 index 2ab66a49cd3546..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-pytorch.rst +++ /dev/null @@ -1,111 +0,0 @@ -[LEGACY] Converting a PyTorch Model -============================================ - - -.. meta:: - :description: Learn how to convert a model from the - PyTorch format to the OpenVINO Intermediate Representation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a PyTorch Model <../../../../../openvino-workflow/model-preparation/convert-model-pytorch>` article. - -This page provides instructions on how to convert a model from the PyTorch format to the OpenVINO IR format. - -The conversion is a required step to run inference using OpenVINO API. -It is not required if you choose to work with OpenVINO under the PyTorch framework, -using its :doc:`torch.compile feature <../../../../../openvino-workflow/torch-compile>`. - -Converting a PyTorch model with PyTorch Frontend -############################################################### - -To convert a PyTorch model to the OpenVINO IR format, use the OVC API (superseding the previously used tool, MO). To do so, use the ``convert_model()`` method, like so: - - -.. code-block:: py - :force: - - import torchvision - import torch - from openvino.tools.mo import convert_model - - model = torchvision.models.resnet50(weights='DEFAULT') - ov_model = convert_model(model) - -Following PyTorch model formats are supported: - -* ``torch.nn.Module`` -* ``torch.jit.ScriptModule`` -* ``torch.jit.ScriptFunction`` - -Converting certain PyTorch models may require model tracing, which needs the ``example_input`` -parameter to be set, for example: - -.. code-block:: py - :force: - - import torchvision - import torch - from openvino.tools.mo import convert_model - - model = torchvision.models.resnet50(weights='DEFAULT') - ov_model = convert_model(model, example_input=torch.randn(1, 3, 100, 100)) - -``example_input`` accepts the following formats: - -* ``openvino.runtime.Tensor`` -* ``torch.Tensor`` -* ``np.ndarray`` -* ``list`` or ``tuple`` with tensors (``openvino.runtime.Tensor`` / ``torch.Tensor`` / ``np.ndarray``) -* ``dictionary`` where key is the input name, value is the tensor (``openvino.runtime.Tensor`` / ``torch.Tensor`` / ``np.ndarray``) - -Sometimes ``convert_model`` will produce inputs of the model with dynamic rank or dynamic type. -Such model may not be supported by the hardware chosen for inference. To avoid this issue, -use the ``input`` argument of ``convert_model``. For more information, refer to :doc:`Convert Models Represented as Python Objects <../[legacy]-convert-models-as-python-objects>`. - -.. important:: - - The ``convert_model()`` method returns ``ov.Model`` that you can optimize, compile, or save to a file for subsequent use. - -Exporting a PyTorch Model to ONNX Format -######################################## - -It is also possible to export a PyTorch model to ONNX and then convert it to OpenVINO IR. To convert and deploy a PyTorch model this way, follow these steps: - -1. `Export a PyTorch model to ONNX <#exporting-a-pytorch-model-to-onnx-format>`__. -2. :doc:`Convert an ONNX model <[legacy]-convert-onnx>` to produce an optimized :doc:`Intermediate Representation <../../../../openvino-ir-format/operation-sets>` of the model based on the trained network topology, weights, and biases values. - -PyTorch models are defined in Python. To export them, use the ``torch.onnx.export()`` method. The code to -evaluate or test the model is usually provided with its code and can be used for its initialization and export. -The export to ONNX is crucial for this process, but it is covered by PyTorch framework, therefore, It will not be covered here in detail. -For more information, refer to the `Exporting PyTorch models to ONNX format `__ guide. - -To export a PyTorch model, you need to obtain the model as an instance of ``torch.nn.Module`` class and call the ``export`` function. - -.. code-block:: py - :force: - - import torch - - # Instantiate your model. This is just a regular PyTorch model that will be exported in the following steps. - model = SomeModel() - # Evaluate the model to switch some operations from training mode to inference. - model.eval() - # Create dummy input for the model. It will be used to run the model inside export function. - dummy_input = torch.randn(1, 3, 224, 224) - # Call the export function - torch.onnx.export(model, (dummy_input, ), 'model.onnx') - - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials <[legacy]-conversion-tutorials>` page for a set of tutorials providing step-by-step instructions for converting specific PyTorch models. Here are some examples: - -* :doc:`Convert PyTorch BERT-NER Model <[legacy]-conversion-tutorials/convert-pytorch-bert-ner>` -* :doc:`Convert PyTorch RCAN Model <[legacy]-conversion-tutorials/convert-pytorch-rcan>` -* :doc:`Convert PyTorch YOLACT Model <[legacy]-conversion-tutorials/convert-pytorch-yolact>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow-lite.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow-lite.rst deleted file mode 100644 index 6d9256cdf09994..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow-lite.rst +++ /dev/null @@ -1,37 +0,0 @@ -[LEGACY] Converting a TensorFlow Lite Model -===================================================== - - -.. meta:: - :description: Learn how to convert a model from a - TensorFlow Lite format to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a TensorFlow Lite Model <../../../../../openvino-workflow/model-preparation/convert-model-tensorflow-lite>` article. - -To convert a TensorFlow Lite model, use the ``mo`` script and specify the path to the input ``.tflite`` model file: - -.. code-block:: sh - - mo --input_model .tflite - -TensorFlow Lite models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <../../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. - -.. important:: - - The ``convert_model()`` method returns ``ov.Model`` that you can optimize, compile, or save to a file for subsequent use. - -Supported TensorFlow Lite Layers -################################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations <../../../../../about-openvino/compatibility-and-support/supported-operations>` page. - -Supported TensorFlow Lite Models -################################### - -More than eighty percent of public TensorFlow Lite models are supported from open sources `TensorFlow Hub `__ and `MediaPipe `__. -Unsupported models usually have custom TensorFlow Lite operations. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow.rst deleted file mode 100644 index 2bcb6fde9b833b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-convert-tensorflow.rst +++ /dev/null @@ -1,359 +0,0 @@ -[LEGACY] Converting a TensorFlow Model -============================================ - -.. meta:: - :description: Learn how to convert a model from a - TensorFlow format to the OpenVINO Intermediate Representation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a TensorFlow Model <../../../../../openvino-workflow/model-preparation/convert-model-tensorflow>` article. - - -.. note:: TensorFlow models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <../../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. - -The conversion instructions are different depending on whether your model was created with TensorFlow v1.X or TensorFlow v2.X. - -Converting TensorFlow 1 Models -############################### - -Converting Frozen Model Format -+++++++++++++++++++++++++++++++ - -To convert a TensorFlow model, use the ``*mo*`` script to simply convert a model with a path to the input model *.pb* file: - -.. code-block:: sh - - mo --input_model .pb - - -Converting Non-Frozen Model Formats -+++++++++++++++++++++++++++++++++++ - -There are three ways to store non-frozen TensorFlow models and convert them by model conversion API: - -1. **Checkpoint**. In this case, a model consists of two files: ``inference_graph.pb`` (or ``inference_graph.pbtxt``) and ``checkpoint_file.ckpt``. -If you do not have an inference graph file, refer to the `Freezing Custom Models in Python <#freezing-custom-models-in-python>`__ section. -To convert the model with the inference graph in ``.pb`` format, run the `mo` script with a path to the checkpoint file: - -.. code-block:: sh - - mo --input_model .pb --input_checkpoint - -To convert the model with the inference graph in ``.pbtxt`` format, run the ``mo`` script with a path to the checkpoint file: - -.. code-block:: sh - - mo --input_model .pbtxt --input_checkpoint --input_model_is_text - - -2. **MetaGraph**. In this case, a model consists of three or four files stored in the same directory: ``model_name.meta``, ``model_name.index``, -``model_name.data-00000-of-00001`` (the numbers may vary), and ``checkpoint`` (optional). -To convert such TensorFlow model, run the `mo` script with a path to the MetaGraph ``.meta`` file: - -.. code-block:: sh - - mo --input_meta_graph .meta - - -3. **SavedModel format**. In this case, a model consists of a special directory with a ``.pb`` file -and several subfolders: ``variables``, ``assets``, and ``assets.extra``. For more information about the SavedModel directory, refer to the `README `__ file in the TensorFlow repository. -To convert such TensorFlow model, run the ``mo`` script with a path to the SavedModel directory: - -.. code-block:: sh - - mo --saved_model_dir - - -You can convert TensorFlow 1.x SavedModel format in the environment that has a 1.x or 2.x version of TensorFlow. However, TensorFlow 2.x SavedModel format strictly requires the 2.x version of TensorFlow. -If a model contains operations currently unsupported by OpenVINO, prune these operations by explicit specification of input nodes using the ``--input`` option. -To determine custom input nodes, display a graph of the model in TensorBoard. To generate TensorBoard logs of the graph, use the ``--tensorboard_logs`` option. -TensorFlow 2.x SavedModel format has a specific graph due to eager execution. In case of pruning, find custom input nodes in the ``StatefulPartitionedCall/*`` subgraph of TensorFlow 2.x SavedModel format. - -Freezing Custom Models in Python -++++++++++++++++++++++++++++++++ - -When a network is defined in Python code, you have to create an inference graph file. Graphs are usually built in a form -that allows model training. That means all trainable parameters are represented as variables in the graph. -To be able to use such graph with model conversion API, it should be frozen and dumped to a file with the following code: - -.. code-block:: py - :force: - - import tensorflow as tf - from tensorflow.python.framework import graph_io - frozen = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["name_of_the_output_node"]) - graph_io.write_graph(frozen, './', 'inference_graph.pb', as_text=False) - -Where: - -* ``sess`` is the instance of the TensorFlow Session object where the network topology is defined. -* ``["name_of_the_output_node"]`` is the list of output node names in the graph; ``frozen`` graph will include only those nodes from the original ``sess.graph_def`` that are directly or indirectly used to compute given output nodes. The ``'name_of_the_output_node'`` is an example of a possible output node name. You should derive the names based on your own graph. -* ``./`` is the directory where the inference graph file should be generated. -* ``inference_graph.pb`` is the name of the generated inference graph file. -* ``as_text`` specifies whether the generated file should be in human readable text format or binary. - -Converting TensorFlow 2 Models -############################### - -To convert TensorFlow 2 models, ensure that `openvino-dev[tensorflow2]` is installed via `pip`. -TensorFlow 2.X officially supports two model formats: SavedModel and Keras H5 (or HDF5). -Below are the instructions on how to convert each of them. - -SavedModel Format -+++++++++++++++++ - -A model in the SavedModel format consists of a directory with a ``saved_model.pb`` file and two subfolders: ``variables`` and ``assets``. -To convert such a model, run the `mo` script with a path to the SavedModel directory: - -.. code-block:: sh - - mo --saved_model_dir - -TensorFlow 2 SavedModel format strictly requires the 2.x version of TensorFlow installed in the -environment for conversion to the Intermediate Representation (IR). - -If a model contains operations currently unsupported by OpenVINO™, -prune these operations by explicit specification of input nodes using the ``--input`` or ``--output`` -options. To determine custom input nodes, visualize a model graph in the TensorBoard. - -TensorFlow 2 SavedModel format has a specific graph structure due to eager execution. In case of -pruning, find custom input nodes in the ``StatefulPartitionedCall/*`` subgraph. - -Since the 2023.0 release, direct pruning of models in SavedModel format is not supported. -It is essential to freeze the model before pruning. Use the following code snippet for model freezing: - -.. code-block:: py - :force: - - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - saved_model_dir = "./saved_model" - imported = tf.saved_model.load(saved_model_dir) - # retrieve the concrete function and freeze - concrete_func = imported.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY] - frozen_func = convert_variables_to_constants_v2(concrete_func, - lower_control_flow=False, - aggressive_inlining=True) - # retrieve GraphDef and save it into .pb format - graph_def = frozen_func.graph.as_graph_def(add_shapes=True) - tf.io.write_graph(graph_def, '.', 'model.pb', as_text=False) - -Keras H5 -++++++++ - -If you have a model in HDF5 format, load the model using TensorFlow 2 and serialize it to -SavedModel format. Here is an example of how to do it: - -.. code-block:: py - :force: - - import tensorflow as tf - model = tf.keras.models.load_model('model.h5') - tf.saved_model.save(model,'model') - - -The Keras H5 model with a custom layer has specifics to be converted into SavedModel format. -For example, the model with a custom layer ``CustomLayer`` from ``custom_layer.py`` is converted as follows: - -.. code-block:: py - :force: - - import tensorflow as tf - from custom_layer import CustomLayer - model = tf.keras.models.load_model('model.h5', custom_objects={'CustomLayer': CustomLayer}) - tf.saved_model.save(model,'model') - - -Then follow the above instructions for the SavedModel format. - -.. note:: - - Do not use other hacks to resave TensorFlow 2 models into TensorFlow 1 formats. - -Command-Line Interface (CLI) Examples Using TensorFlow-Specific Parameters -########################################################################## - -* Launching model conversion for Inception V1 frozen model when model file is a plain text protobuf: - - .. code-block:: sh - - mo --input_model inception_v1.pbtxt --input_model_is_text -b 1 - - -* Launching model conversion for Inception V1 frozen model and dump information about the graph to TensorBoard log dir ``/tmp/log_dir`` - - .. code-block:: sh - - mo --input_model inception_v1.pb -b 1 --tensorboard_logdir /tmp/log_dir - - -* Launching model conversion for BERT model in the SavedModel format, with three inputs. Specify explicitly the input shapes where the batch size and the sequence length equal 2 and 30 respectively. - - .. code-block:: sh - - mo --saved_model_dir BERT --input mask,word_ids,type_ids --input_shape [2,30],[2,30],[2,30] - -Conversion of TensorFlow models from memory using Python API -############################################################ - -Model conversion API supports passing TensorFlow/TensorFlow2 models directly from memory. - -* ``tf.keras.Model`` - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - model = tf.keras.applications.ResNet50(weights="imagenet") - ov_model = convert_model(model) - - -* ``tf.keras.layers.Layer``. Requires setting the "input_shape". - - .. code-block:: py - :force: - - import tensorflow_hub as hub - from openvino.tools.mo import convert_model - - model = hub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/classification/5") - ov_model = convert_model(model, input_shape=[-1, 224, 224, 3]) - -* ``tf.Module``. Requires setting the "input_shape". - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - class MyModule(tf.Module): - def __init__(self, name=None): - super().__init__(name=name) - self.variable1 = tf.Variable(5.0, name="var1") - self.variable2 = tf.Variable(1.0, name="var2") - def __call__(self, x): - return self.variable1 * x + self.variable2 - - model = MyModule(name="simple_module") - ov_model = convert_model(model, input_shape=[-1]) - -* ``tf.compat.v1.Graph`` - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [100], 'Input1') - inp2 = tf.compat.v1.placeholder(tf.float32, [100], 'Input2') - output = tf.nn.relu(inp1 + inp2, name='Relu') - tf.compat.v1.global_variables_initializer() - model = sess.graph - - ov_model = convert_model(model) - -* ``tf.compat.v1.GraphDef`` - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [100], 'Input1') - inp2 = tf.compat.v1.placeholder(tf.float32, [100], 'Input2') - output = tf.nn.relu(inp1 + inp2, name='Relu') - tf.compat.v1.global_variables_initializer() - model = sess.graph_def - - ov_model = convert_model(model) - -* ``tf.function`` - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - @tf.function( - input_signature=[tf.TensorSpec(shape=[1, 2, 3], dtype=tf.float32), - tf.TensorSpec(shape=[1, 2, 3], dtype=tf.float32)]) - def func(x, y): - return tf.nn.sigmoid(tf.nn.relu(x + y)) - - ov_model = convert_model(func) - -* ``tf.compat.v1.session`` - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [100], 'Input1') - inp2 = tf.compat.v1.placeholder(tf.float32, [100], 'Input2') - output = tf.nn.relu(inp1 + inp2, name='Relu') - tf.compat.v1.global_variables_initializer() - - ov_model = convert_model(sess) - -* ``tf.train.checkpoint`` - - .. code-block:: py - :force: - - import tensorflow as tf - from openvino.tools.mo import convert_model - - model = tf.keras.Model(...) - checkpoint = tf.train.Checkpoint(model) - save_path = checkpoint.save(save_directory) - # ... - checkpoint.restore(save_path) - ov_model = convert_model(checkpoint) - -.. important:: - - The ``convert_model()`` method returns ``ov.Model`` that you can optimize, compile, or save to a file for subsequent use. - -Supported TensorFlow and TensorFlow 2 Keras Layers -################################################## - -For the list of supported standard layers, refer to the :doc:`Supported Operations <../../../../../about-openvino/compatibility-and-support/supported-operations>` page. - -Frequently Asked Questions (FAQ) -################################ - -The model conversion API provides explanatory messages if it is unable to run to completion due to typographical errors, incorrectly used options, or other issues. The message describes the potential cause of the problem and gives a link to the :doc:`Model Optimizer FAQ <../[legacy]-model-optimizer-faq>`. The FAQ provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in :doc:`Convert a Model <../../legacy-conversion-api>` to help you understand what went wrong. - -Summary -####### - -In this document, you learned: - -* Basic information about how the model conversion API works with TensorFlow models. -* Which TensorFlow models are supported. -* How to freeze a TensorFlow model. -* How to convert a trained TensorFlow model using model conversion API with both framework-agnostic and TensorFlow-specific command-line parameters. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials <[legacy]-conversion-tutorials>` page for a set of tutorials providing step-by-step instructions for converting specific TensorFlow models. Here are some examples: - -* :doc:`Convert TensorFlow EfficientDet Models <[legacy]-conversion-tutorials/convert-tensorflow-efficient-det>` -* :doc:`Convert TensorFlow FaceNet Models <[legacy]-conversion-tutorials/convert-tensorflow-face-net>` -* :doc:`Convert TensorFlow Object Detection API Models <[legacy]-conversion-tutorials/convert-tensorflow-object-detection>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-troubleshooting-reshape-errors.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-troubleshooting-reshape-errors.rst deleted file mode 100644 index 4d5c282a947d1b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-troubleshooting-reshape-errors.rst +++ /dev/null @@ -1,54 +0,0 @@ -[LEGACY] Troubleshooting Reshape Errors -======================================= - - -.. meta:: - :description: In OpenVINO™, you can use several methods to address the issues - of non-reshape-able models and shape collision, which prevent - normal shape propagation. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - -How To Avoid Shape Collision -############################ - -Operation semantics may impose restrictions on input shapes of the operation. -Shape collision during shape propagation may be a sign that new shape does not satisfy the restrictions. -Changing the model input shape may result in intermediate operations shape collision. For example, in the following: - -* The :doc:`Reshape <../../../openvino-ir-format/operation-sets/operation-specs/shape/reshape-1>` operation with a hard-coded output shape value, -* The :doc:`MatMul <../../../openvino-ir-format/operation-sets/operation-specs/matrix/matmul-1>` operation with the ``Const`` second input and this input cannot be resized by spatial dimensions due to operation semantics. - -Model structure and logic should not change significantly after model reshaping. - -* The Global Pooling operation is commonly used to reduce output feature map of classification models output. Having the input of the shape *[N, C, H, W]*, Global Pooling returns the output of the shape *[N, C, 1, 1]*. Model architects usually express Global Pooling with the help of the ``Pooling`` operation with the fixed kernel size *[H, W]*. During spatial reshape, having the input of the shape *[N, C, H1, W1]*, ``Pooling`` with the fixed kernel size *[H, W]* returns the output of the shape *[N, C, H2, W2]*, where *H2* and *W2* are commonly not equal to *1*. It breaks the classification model structure. For example, the public `Inception family models from TensorFlow `__ have this issue. - -* Changing the model input shape may significantly affect its accuracy. For example, Object Detection models from TensorFlow have resizing restrictions by design. To keep the model valid after the reshape, choose a new input shape that satisfies conditions listed in the ``pipeline.config`` file. - -.. _how-to-fix-non-reshape-able-model: - -How To Fix Non-Reshape-able Model -################################# - -To fix some operators which prevent normal shape propagation: - -* see if the issue can be fixed via changing the values of some operators' input. For example, the most common problem of non-reshape-able models is a ``Reshape`` operator with a hard-coded output shape. You can cut-off the hard-coded second input of ``Reshape`` and fill it in with relaxed values. For the following example in the diagram below, the model conversion API command line should read: - - .. code-block:: sh - - mo --input_model path/to/model --input data[8,3,224,224],1:reshaped[2]->[0,-1]` - - - With ``1:reshaped[2]``, it is required to cut the second input (counting from zero, so ``1:`` means the second input) of the operation named ``reshaped`` and replace it with a ``Parameter`` with shape ``[2]``. - With ``->[0 -1]``, this new ``Parameter`` is replaced by a ``Constant`` operator which has the ``[0, -1]`` value. - Since the ``Reshape`` operator has ``0`` and ``-1`` as specific values, it allows propagating shapes freely without losing the intended meaning of ``Reshape``. For more information, see :doc:`the specification <../../../openvino-ir-format/operation-sets/operation-specs/shape/reshape-1>`. - - .. image:: ../../../../assets/images/batch_relaxation.png - -* transform the model conversion on the back phase. For more information, see the :doc:`How to Convert a Model <../legacy-model-optimizer-extensibility>`, -* transform OpenVINO Model during the runtime. For more information, see :doc:`OpenVINO Runtime Transformations <../../../openvino-extensibility/transformation-api>`, -* modify the original model with the help of the original framework. - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility.rst deleted file mode 100644 index 3d2365f45ffe3b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility.rst +++ /dev/null @@ -1,326 +0,0 @@ -Legacy Model Optimizer Extensibility -==================================== - - - -.. toctree:: - :maxdepth: 1 - :hidden: - - legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification - legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions - legacy-model-optimizer-extensibility/[legacy]-extending-model-optimizer-with-caffe-python-layers - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../openvino-extensibility/frontend-extensions>` article. - -This article describes Model Optimizer internals. Altering them may result in application instability, and in case of future changes to the API, lack of backward compatibility. - -.. note:: - If you want to add support for ONNX, TensorFlow Lite, PaddlePaddle or TensorFlow operations, or you are not familiar with other extension alternatives in OpenVINO, read :doc:`this guide <../../openvino-extensibility>` instead. - -.. _model-optimizer-extensibility: - -Model Optimizer extensibility mechanism enables support of new operations and custom transformations to generate the optimized intermediate representation (IR) as described :doc:`here <../../openvino-ir-format/operation-sets>`. -This mechanism is a core part of Model Optimizer, as a huge set of examples showing how to add custom logic to support your model. - -There are several cases when the customization is needed: - -* A model contains operation(s) not known for the Model Optimizer, but these operation(s) could be expressed as a combination of supported operations. In this case, a custom transformation should be implemented to replace unsupported operation(s) with supported ones. -* A model contains a sub-graph of operations that can be replaced with a smaller number of operations to get better performance. This example corresponds to so-called *fusing transformations* (e.g., replacing a sub-graph performing the calculation :math:`x/(1.0+e^{-(beta*x)})` with a single operation of type :doc:`Swish <../../openvino-ir-format/operation-sets/operation-specs/activation/swish-4>`. -* A model contains a custom framework operation (the operation that is not a part of an official operation set of the framework) that was developed using the framework extensibility mechanism. In this case, Model Optimizer should know how to handle the operation and generate a corresponding section in an IR for it. - -It is necessary to figure out how Model Optimizer represents a model in a memory and converts it to an IR before -going into details of the Model Optimizer extensibility mechanism. - -.. note:: - All paths in this article are provided relatively to the Model Optimizer installation directory if not stated otherwise. - -.. _mo_model_representation_in_memory: - -============================== -Model Representation in Memory -============================== - -The model can be represented as a directed graph, where nodes are operations and edges correspond to data passing from a -producer operation (node) to a consumer operation (node). - -Model Optimizer uses Python class ``mo.graph.graph.Graph`` instance to represent the computation graph in memory during -the model conversion. This class is inherited from the ``networkx.MultiDiGraph`` class of the standard ``networkx`` Python -library. It provides many convenient methods to traverse and modify the graph. Refer to the ``mo/graph/graph.py`` file for examples. - -Model Optimizer keeps all necessary information about the operation in node attributes. Model Optimizer uses the ``mo.graph.graph.Node`` class defined in the ``mo/graph/graph.py`` file, which is a wrapper on top of a ``networkx`` node attributes -dictionary, and provides many convenient methods to work with the node. For example, the node ``my_node`` attribute with a -name ``my_attr`` can be retrieved from the node with the following code ``my_node.my_attr``, which is equivalent to obtaining -attribute with name ``my_attr`` in the ``graph.node[my_node]`` dictionary. For the class implementation details, refer to the ``mo/graph/graph.py`` file. - -An operation may have several inputs and outputs. For example, operation :doc:`Split <../../openvino-ir-format/operation-sets/operation-specs/movement/split-1>` has -two inputs: data to split and axis to split along, and variable number of outputs depending on a value of attribute -``num_splits``. Each input data to the operation is passed to a specific operation **input port**. An operation produces -the output data from an **output port**. Input and output ports are numbered from 0 independently. Model Optimizer uses -classes ``mo.graph.port.Port`` and ``mo.graph.connection.Connection``, which are useful abstraction to perform graph -modifications like nodes connecting/re-connecting and graph traversing. These classes are widely used in the Model -Optimizer code so it is easy to find a lot of usage examples. - -There is no dedicated class corresponding to an edge, so low-level graph manipulation is needed to get access to -edge attributes if needed. Meanwhile, most manipulations with nodes connections should be done with help of the -``mo.graph.connection.Connection`` and ``mo.graph.port.Port`` classes. Thus, low-level graph manipulation is error prone and -is strongly not recommended. - -Further details and examples related to a model representation in memory are provided in the sections below, in a context -for a better explanation. For more information on how to use ports and connections, refer to the :doc:`Graph Traversal and Modification Using Ports and Connections ` article. - -.. _mo_model_conversion_pipeline: - -========================= -Model Conversion Pipeline -========================= - -A model conversion pipeline can be represented with the following diagram: - -.. image:: ../../../assets/images/MO_conversion_pipeline.svg - -Each conversion step is reviewed in details below. - -Model Loading -############# - -Model Optimizer gets a trained model file as an input. The model loader component of Model Optimizer reads a model file -using Python bindings provided with the framework and builds an in-memory representation of a computation graph. There -is a separate loader for each supported framework. These loaders are implemented in the -``extensions/load//loader.py`` files of Model Optimizer. - -.. note:: - Model Optimizer uses a special parser for Caffe models built on top of the ``caffe.proto`` file. In the case of a model loading failure, Model Optimizer throws an error and requests preparation of the parser that can read the model. For more information on how to prepare the custom Caffe parser, refer to the :ref:`question #1 ` in the :doc:`Model Optimizer FAQ `. - -The result of a model loading step is a ``Graph`` object, which can be depicted like in the following example: - -.. image:: ../../../assets/images/MO_graph_after_loader.svg - -Model Optimizer loader saves an operation instance framework description (usually it is a Protobuf message) into a node -attribute usually with a name ``pb`` for each operation of an input model. It is important that this is a -**framework-specific** description of an operation. This means that an operation (e.g. -:doc:`Convolution <../../openvino-ir-format/operation-sets/operation-specs/convolution/convolution-1>` may be represented differently in, for example, Caffe and -TensorFlow frameworks but performs the same calculations from a mathematical point of view. - -In the image above, the **Operation 2** has one input and two outputs. The tensor produced from the output **port 0** is -consumed with the **Operation 5** (the input **port 0**) and **Operation 3** (the input **port 1**). The tensor produced from the -output **port 1** is consumed with the **Operation 4** (the input **port 0**). - -Each edge has two attributes: ``in`` and ``out``. They contain the input port number of the consumer node and the output port -number of the producer node. These attributes describe the fact that nodes are operations consuming some input tensors -and producing some output tensors. From the perspective of Model Optimizer, nodes themselves are **black boxes** because -they do not contain required information about the operation they perform. - -Operations Attributes Extracting -################################ - -The next step is to parse framework-dependent operation representation saved in a node attribute and update the node -attributes with the operation specific attributes. There are three options to do this. - -1. The extractor extension approach (recommended way to extract attributes for an operation). Explained in details in the :doc:`Operation Extractor ` article. -2. The legacy approach with a built-in extractor. The ``mo/front//extractor.py`` file (for example, the one for Caffe) defines a dictionary with extractors for specific operation types. A key in the dictionary is a type of an operation to trigger the extracting function for and the value is the function. The function has one parameter – a node to extract attributes from. This is a legacy and non-extensible approach so it should be avoided. This mechanism will be removed in future versions of Model Optimizer. - -The extractors execution order is the following: - -* ``CustomLayersMapping.xml`` (for Caffe models only). -* Model Optimizer extension. -* Built-in Model Optimizer extractor. - -The result of operations attributes extracting step can be depicted like in the following example: - -.. image:: ../../../assets/images/MO_graph_after_extractors.svg - -The only difference in the graph from the previous step is that nodes contain dictionary with extracted attributes and -operation-specific attributes needed for Model Optimizer. However, from this step, Model Optimizer does not -need the original representation of the operation/model and just uses Model Optimizer representation (there are some -peculiar cases in which Model Optimizer still uses the ``pb`` attribute, covered in this -article partially). A detailed list of common node attributes and their values is provided in the -:doc:`Model Optimizer Operation ` article. - -Front Phase -########### - -For legacy reasons, you must specify shapes for all not fully-defined inputs of the model. In contrast, other -machine learning frameworks, like TensorFlow, let you create a model with undefined or partially defined input shapes. -As an example, undefined dimension is marked with an integer value ``-1`` in a TensorFlow model or has some string name -in an ONNX model. - -During the front phase, Model Optimizer knows shape of the model inputs and constants only and does not know shapes -(and even ranks) of the intermediate tensors. But information about shapes may not be needed to implement particular -transformation. For example, the transformation ``extensions/front/TopKNormalize.py`` removes an attribute ``k`` from a -``TopK`` node and adds an input constant with the value ``k``. The transformation is needed to convert a ``TopK`` operation. -It comes from frameworks, where a number of output elements is defined as an attribute of the operation to the -OpenVINO :doc:`TopK <../../openvino-ir-format/operation-sets/operation-specs/sort/top-k-3>` operation semantic, which requires this value to be a separate input. - -It is important to mention that sometimes it seems like transformation cannot be implemented during the front phase -because the actual values of inputs or shapes are needed. In fact, manipulations of shapes or values can be implemented -using operations that are added to the graph. Consider the -``extensions/front/onnx/flattenONNX_to_reshape.py`` transformation, which replaces an ONNX -`Flatten `__ operation with a sub-graph of operations performing -the following (when ``axis`` is not equal to 0 and 1): - -1. Calculate a shape of the ``Flatten`` input tensor, using the :doc:`ShapeOf <../../openvino-ir-format/operation-sets/operation-specs/shape/shape-of-3>` operation. -2. Get the first ``axis`` elements from the output of ``Shape`` operation and calculate their product, using the :doc:`ReduceProd <../../openvino-ir-format/operation-sets/operation-specs/reduction/reduce-prod-1>` operation. -3. Concatenate output of the ``ReduceProd`` and constant with the value of ``-1`` (for an explanation of this value refer to the :doc:`Reshape <../../openvino-ir-format/operation-sets/operation-specs/shape/reshape-1>` specification page). -4. Use the concatenated value as the second input to the ``Reshape`` operation. - -It is highly recommended to write shape-agnostic transformations to avoid model reshape-ability issues. For more information related to the reshaping of a model, refer to the :doc:`Using Shape Inference <../../../openvino-workflow/running-inference/changing-input-shape>` guide. - -More information on how to develop front phase transformations and dedicated API description is provided in the -:ref:`Front Phase Transformations `. - -.. _mo_partial_inference: - -Partial Inference -################# - -Model Optimizer performs a partial inference of a model during model conversion. This procedure includes output shapes -calculation of all operations in a model and constant folding (value calculation for constant sub-graphs). The constant -folding is needed for the shape inference because in some cases evaluation of constant sub-graph is needed to calculate -output shapes. For example, the output shape for the :doc:`Reshape <../../openvino-ir-format/operation-sets/operation-specs/shape/reshape-1>` operation may be -defined as a mathematical expression using the :doc:`ShapeOf <../../openvino-ir-format/operation-sets/operation-specs/shape/shape-of-3>` operation output. - -.. note:: - Model Optimizer does not fold sub-graphs starting from the :doc:`ShapeOf <../../openvino-ir-format/operation-sets/operation-specs/shape/shape-of-3>` operation by default because this leads to a model non-reshape-ability (the command-line parameter ``--static_shape`` can override this behavior). For more information related to reshaping of a model, refer to the :doc:`Using Shape Inference <../../../openvino-workflow/running-inference/changing-input-shape>` guide. - -Model Optimizer calculates output shapes for all operations in a model to write them to Intermediate Representation files. - -.. note:: - This is a legacy requirement. Starting with IR version 10, OpenVINO Runtime needs to know shapes of the :doc:`Const <../../openvino-ir-format/operation-sets/operation-specs/infrastructure/constant-1>` and the :doc:`Parameter <../../openvino-ir-format/operation-sets/operation-specs/infrastructure/parameter-1>` operations only. The OpenVINO Runtime calculates output shapes for all operations in a model, using shapes of :doc:`Parameter <../../openvino-ir-format/operation-sets/operation-specs/infrastructure/parameter-1>` and :doc:`Const <../../openvino-ir-format/operation-sets/operation-specs/infrastructure/constant-1>` operations defined with respective operation attributes. - -Model Optimizer inserts **data** nodes to the computation graph before starting the partial inference phase. The data node -corresponds to the specific tensor produced with the operation. Each data node contains two attributes: ``shape``, -containing the shape of the tensor, and ``value``, which may contain the actual value of the tensor. The value for a ``value`` -attribute is equal to ``None`` if this tensor value cannot be calculated. This happens in two cases: when a tensor value -depends on a values passed to the :doc:`Parameter <../../openvino-ir-format/operation-sets/operation-specs/infrastructure/parameter-1>` operation of a model or -Model Optimizer does not have value propagation implementation for the operation. - -Before running partial inference, the graph can be depicted like in the following example: - -.. image:: ../../../assets/images/MO_graph_before_partial_inference.svg - -The difference in a graph structure with a graph during the front phase is not only in the data nodes, but also in the -edge attributes. Note that an ``out`` attribute is specified for edges **from operation** nodes only, while an ``in`` -attribute is specified for edges **from data** nodes only. This corresponds to the fact that a tensor (data node) is -produced from a specific output port of an operation and is consumed with a specific input port of an operation. Also, -a unique data node is created for each output port of an operation. The node may be used as an input node for several -operation nodes. Similarly to the data node **data2_0**, which is consumed with the input **port 1** of the **Operation 3** and -input **port 0** of the **Operation 5**. - -Now, consider how Model Optimizer performs shape and value propagation. Model Optimizer performs graph nodes -topological sort. An error message is thrown if a graph contains a cycle. Then, shape inference functions are called for -each node in the graph, according to the topological order. Each node of the graph must have an attribute called ``infer`` -with a shape inference function, which is a function with one parameter – an instance of the ``Node`` class. The ``infer`` -attribute is usually set in the operation extractor or when a node is added in some transformation using the Model -Optimizer operation class inherited from the ``mo.pos.Op`` class. For more information on how to specify a shape inference function, -refer to the :doc:`Model Optimizer Operation ` and :doc:`Operation Extractor ` articles. - -A shape inference function should calculate an operation (node) output shape(s) based on input shape(s) and operation -(node) attribute(s) and update ``shape`` and optionally ``value`` attributes of the corresponding data node(s). A simplified -example of the shape infer function for the :doc:`Reshape <../../openvino-ir-format/operation-sets/operation-specs/shape/reshape-1>` operation (the full version is -available in the ``mo/ops/reshape.py`` file): - -.. code-block:: py - :force: - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - input_shape = node.in_port(0).data.get_shape() # get the input tensor shape - new_shape = node.in_port(1).data.get_value() # get the value defining the output tensor shape. This tensor may - # have special values like 0 and -1 - - output_shape = ... # calculate output shape without special values like 0 and -1 - - if node.in_port(0).data.get_value() is not None: # if the input value is defined then calculate output value; - # shape will be updated automatically with the value shape - node.out_port(0).data.set_value(node.in_port(0).data.get_value().reshape(output_shape)) - else: # in the opposite case calculate the output shape only - node.out_port(0).data.set_shape(output_shape) - -Methods ``in_port()`` and ``output_port()`` of the ``Node`` class are used to get and set data node attributes. For more information on -how to use them, refer to the :doc:`Graph Traversal and Modification Using Ports and Connections ` article. - -.. note:: - A shape inference function should perform output shape calculation in the original model layout. For example, OpenVINO™ supports Convolution operations in NCHW layout only but TensorFlow supports NHWC layout as well. Model Optimizer shape inference function calculates output shapes for NHWC Convolutions in NHWC layout and only during the layout change phase the shape is converted to NCHW. - -.. note:: - There is a legacy approach to read data node attribute, like ``input_shape = op_node.in_node(0).shape`` and modify data nodes attributes, like ``op_node.out_node(0).shape = some_value``. This approach is still used in the Model Optimizer code but is not recommended. Instead, use the approach described in the :ref:`Ports `. - -Middle Phase -############ - -The middle phase starts after partial inference. At this phase, a graph contains data nodes and output shapes of all -operations in the graph have been calculated. Any transformation implemented at this stage must update the ``shape`` -attribute for all newly added operations. It is highly recommended to use API described in the -:doc:`Graph Traversal and Modification Using Ports and Connections ` because modification of a graph using this API causes automatic re-inference of affected nodes as well as necessary data nodes creation. - -More information on how to develop middle transformations and dedicated API description is provided in the -:ref:`Middle Phase Transformations `. - -NHWC to NCHW Layout Change -########################## - -There are several middle transformations responsible for changing model layout from NHWC to NCHW. These transformations are triggered by default for TensorFlow models as TensorFlow supports Convolution operations in the NHWC layout. - -This layout change is disabled automatically if the model does not have operations that OpenVINO™ needs to execute in the NCHW layout, for example, Convolutions in NHWC layout. - -For more details on how it works, refer to the source code of the transformations mentioned in the below summary of the process: - -1. Model Optimizer changes output shapes of most of operations producing 4D and 5D (four dimensional and five dimensional) tensors as if they were in NHWC layout to NCHW layout: ``nchw_shape = np.array(nhwc_shape)[0, 3, 1, 2]`` for 4D and ``nchw_shape = np.array(nhwc_shape)[0, 4, 1, 2, 3]`` for 5D. This permutation does not happen for some operations with specific conditions identified during a model conversion. -2. Model Optimizer inserts :doc:`Gather <../../openvino-ir-format/operation-sets/operation-specs/movement/gather-1>` operations to the sub-graph relates to shapes calculation in order to perform shape calculation in a correct layout. -3. Model Optimizer inserts :doc:`Transpose <../../openvino-ir-format/operation-sets/operation-specs/movement/transpose-1>` operations for some operations with specific conditions, identified during a model conversion, to produce correct inference results. - -The main transformations responsible for a layout change are: - -* ``extensions/middle/ApplyPermutations.py`` -* ``extensions/middle/InsertLayoutPropagationTransposes.py`` -* ``extensions/middle/MarkSubgraphsWithCorrectLayout.py`` -* ``extensions/middle/ApplyNHWCtoNCHWpermutation.py`` -* ``extensions/middle/LayoutChangeForConstantShapePaths.py`` - -Back Phase -########## - -The back phase starts after the layout change to NCHW. This phase contains mostly the following transformations: - -1. Transformations that should work with a graph in the NCHW layout and thus cannot be implemented in the middle phase. -2. Transformations that replace nodes corresponding to internal Model Optimizer operations with nodes corresponding to the :doc:`opset <../../openvino-ir-format/operation-sets/available-opsets>` operations. -3. Transformations that normalize operations inputs according to the specification. -4. Final optimization transformations. - -A graph structure during the back phase is the same as during the middle phase. There is no difference in writing middle -and back transformations. - -More information on how to develop back transformations and dedicated API description is provided in the -:ref:`Back Phase Transformations `. - -Intermediate Representation Emitting -#################################### - -The last phase of a model conversion is the Intermediate Representation emitting. Model Optimizer performs the following -steps: - -1. Iterates over all operation nodes in the graph and checks that all nodes have the ``type`` attribute set. This attribute defines the operation type and is used in the OpenVINO to instantiate proper operation from the :doc:`opset <../../openvino-ir-format/operation-sets/available-opsets>` specified in the ``version`` attribute of the node. If a node does not have attribute ``type`` or its value is equal to ``None``, Model Optimizer exits with an error. -2. Performs type inference of graph operations similar to the shape inference. Inferred data types are saved to a port attributes in the IR. -3. Performs topological sort of the graph and changes ``id`` attribute of all operation nodes to be sequential integer values starting from 0. -4. Saves all Constants values to the ``.bin`` file. Constants with the same value are shared among different operations. -5. Generates an ``.xml`` file defining a graph structure. The information about operation inputs and outputs are prepared uniformly for all operations regardless of their type. A list of attributes to be saved to the ``.xml`` file is defined with the ``backend_attrs()`` or ``supported_attrs()`` of the ``Op`` class used for a graph node instantiation. For more information on how the operation attributes are saved to XML, refer to the function ``prepare_emit_ir()`` in the ``mo/pipeline/common.py`` file and :doc:`Model Optimizer Operation ` article. - -==================== -Additional Resources -==================== - -* :doc:`Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™ <../../openvino-ir-format/operation-sets>` -* :doc:`Converting a Model to Intermediate Representation (IR) ` -* :doc:`OpenVINO Model Representation <../../../openvino-workflow/running-inference/integrate-openvino-with-your-application/model-representation>` -* :doc:`OpenVINO™ Extensibility Mechanism <../../openvino-extensibility>` -* :doc:`Graph Traversal and Modification Using Ports and Connections ` -* :doc:`Model Optimizer Extensions ` -* :doc:`Extending Model Optimizer with Caffe Python Layers ` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-extending-model-optimizer-with-caffe-python-layers.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-extending-model-optimizer-with-caffe-python-layers.rst deleted file mode 100644 index 4277f68139845b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-extending-model-optimizer-with-caffe-python-layers.rst +++ /dev/null @@ -1,110 +0,0 @@ -[LEGACY] Extending Model Optimizer with Caffe Python Layers -============================================================ - -.. meta:: - :description: Learn how to extract operator attributes in Model Optimizer to - support a custom Caffe operation written only in Python. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../../openvino-extensibility/frontend-extensions>` article. - -This article provides instructions on how to support a custom Caffe operation written only in Python. For example, the -`Faster-R-CNN model `__ implemented in -Caffe contains a custom proposal layer written in Python. The layer is described in the -`Faster-R-CNN prototxt `__ in the following way: - -.. code-block:: sh - - layer { - name: 'proposal' - type: 'Python' - bottom: 'rpn_cls_prob_reshape' - bottom: 'rpn_bbox_pred' - bottom: 'im_info' - top: 'rois' - python_param { - module: 'rpn.proposal_layer' - layer: 'ProposalLayer' - param_str: "'feat_stride': 16" - } - } - - -This article describes only a procedure on how to extract operator attributes in Model Optimizer. The rest of the -operation enabling pipeline and information on how to support other Caffe operations (written in C++) is described in -the :doc:`Customize Model Optimizer <../legacy-model-optimizer-extensibility>` guide. - -======================================== -Writing Extractor for Caffe Python Layer -======================================== - -Custom Caffe Python layers have an attribute ``type`` (defining the type of the operation) equal to ``Python`` and two -mandatory attributes ``module`` and ``layer`` in the ``python_param`` dictionary. The ``module`` defines the Python module name -with the layer implementation, while ``layer`` value is an operation type defined by a user. In order to extract -attributes for such an operation it is necessary to implement extractor class inherited from the -``CaffePythonFrontExtractorOp`` class instead of ``FrontExtractorOp`` class, used for standard framework layers. The ``op`` -class attribute value should be set to the ``module + "." + layer`` value so the extractor is triggered for this kind of -operation. - -Below is a simplified example of the extractor for the custom operation Proposal from the mentioned Faster-R-CNN model. -The full code with additional checks can be found `here `__. - -The sample code uses operation ``ProposalOp`` which corresponds to ``Proposal`` operation described in the :doc:`Available Operations Sets <../../../openvino-ir-format/operation-sets/available-opsets>` -page. For a detailed explanation of the extractor, refer to the source code below. - -.. code-block:: py - :force: - - from openvino.tools.mo.ops.proposal import ProposalOp - from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp - - - class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' # module + "." + layer - enabled = True # extractor is enabled - - @staticmethod - def extract_proposal_params(node, defaults): - param = node.pb.python_param # get the protobuf message representation of the layer attributes - # parse attributes from the layer protobuf message to a Python dictionary - attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) - update_attrs = defaults - - # the operation expects ratio and scale values to be called "ratio" and "scale" while Caffe uses different names - if 'ratios' in attrs: - attrs['ratio'] = attrs['ratios'] - del attrs['ratios'] - if 'scales' in attrs: - attrs['scale'] = attrs['scales'] - del attrs['scales'] - - update_attrs.update(attrs) - ProposalOp.update_node_stat(node, update_attrs) # update the node attributes - - @classmethod - def extract(cls, node): - # define default values for the Proposal layer attributes - defaults = { - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [0.5, 1, 2], - 'scale': [8, 16, 32], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7 - } - cls.extract_proposal_params(node, defaults) - return cls.enabled - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility <../legacy-model-optimizer-extensibility>` -* :doc:`Graph Traversal and Modification Using Ports and Connections <[legacy]-graph-traversal-and-modification>` -* :doc:`Model Optimizer Extensions <[legacy]-model-optimizer-extensions>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification.rst deleted file mode 100644 index 55b55a77335f2b..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification.rst +++ /dev/null @@ -1,186 +0,0 @@ -[LEGACY] Graph Traversal and Modification -=========================================== - -.. meta:: - :description: Learn about deprecated APIs and the Port and Connection classes - in Model Optimizer used for graph traversal and transformation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../../openvino-extensibility/frontend-extensions>` article. - -There are three APIs for a graph traversal and transformation used in the Model Optimizer: - -1. The API provided with the ``networkx`` Python library for the ``networkx.MultiDiGraph`` class, which is the base class for -the ``mo.graph.graph.Graph`` object. For example, the following methods belong to this API level: - -* ``graph.add_edges_from([list])``, -* ``graph.add_node(x, attrs)``, -* ``graph.out_edges(node_id)`` -* other methods where ``graph`` is a an instance of the ``networkx.MultiDiGraph`` class. - -**This is the lowest-level API. Avoid using it in the Model Optimizer transformations**. For more details, refer to the :ref:`Model Representation in Memory ` section. - -2. The API built around the ``mo.graph.graph.Node`` class. The ``Node`` class is the primary class to work with graph nodes -and their attributes. Examples of such methods and functions are: - -* ``node.in_node(y)``, -* ``node.out_node(x)``, -* ``node.get_outputs()``, -* ``node.insert_node_after(n1, y)``, -* ``create_edge(n1, n2)`` - -**There are some "Node" class methods not recommended for use and some functions defined in the mo.graph.graph have been deprecated**. For more details, refer to the ``mo/graph/graph.py`` file. - -3. The high-level API called Model Optimizer Graph API, which uses ``mo.graph.graph.Graph``, ``mo.graph.port.Port`` and -``mo.graph.connection.Connection`` classes. For example, the following methods belong to this API level: - -* ``node.in_port(x)``, -* ``node.out_port(y)``, -* ``port.get_connection()``, -* ``connection.get_source()``, -* ``connection.set_destination(dest_port)`` - -**This is the recommended API for the Model Optimizer transformations and operations implementation**. - -The main benefit of using the Model Optimizer Graph API is that it hides some internal implementation details (the fact that -the graph contains data nodes), provides API to perform safe and predictable graph manipulations, and adds operation -semantic to the graph. This is achieved with introduction of concepts of ports and connections. - -.. note:: - This article is dedicated to the Model Optimizer Graph API only and does not cover other two non-recommended APIs. - -.. _mo_intro_ports: - -===== -Ports -===== - -An operation semantic describes how many inputs and outputs the operation has. For example, -:doc:`Parameter <../../../openvino-ir-format/operation-sets/operation-specs/infrastructure/parameter-1>` and :doc:`Const <../../../openvino-ir-format/operation-sets/operation-specs/infrastructure/constant-1>` operations have no -inputs and have one output, :doc:`ReLU <../../../openvino-ir-format/operation-sets/operation-specs/activation/relu-1>` operation has one input and one output, -:doc:`Split <../../../openvino-ir-format/operation-sets/operation-specs/movement/split-1>` operation has 2 inputs and a variable number of outputs depending on the value of the -attribute ``num_splits``. - -Each operation node in the graph (an instance of the ``Node`` class) has 0 or more input and output ports (instances of -the ``mo.graph.port.Port`` class). The ``Port`` object has several attributes: - -* ``node`` - the instance of the ``Node`` object the port belongs to. -* ``idx`` - the port number. Input and output ports are numbered independently, starting from ``0``. Thus, - :doc:`ReLU <../../../openvino-ir-format/operation-sets/operation-specs/activation/relu-1>` operation has one input port (with index ``0``) and one output port (with index ``0``). -* ``type`` - the type of the port. Could be equal to either ``"in"`` or ``"out"``. -* ``data`` - the object that should be used to get attributes of the corresponding data node. This object has methods ``get_shape()`` / ``set_shape()`` and ``get_value()`` / ``set_value()`` to get/set shape/value of the corresponding data node. For example, ``in_port.data.get_shape()`` returns an input shape of a tensor connected to input port ``in_port`` (``in_port.type == 'in'``), ``out_port.data.get_value()`` returns a value of a tensor produced from output port ``out_port`` (``out_port.type == 'out'``). - -.. note:: - Functions ``get_shape()`` and ``get_value()`` return ``None`` until the partial inference phase. For more information about model conversion phases, refer to the :ref:`Model Conversion Pipeline `. For information about partial inference phase, see the :ref:`Partial Inference `. - -There are several methods of the ``Node`` class to get the instance of a corresponding port: - -* ``in_port(x)`` and ``out_port(x)`` to get the input/output port with number ``x``. -* ``in_ports()`` and ``out_ports()`` to get a dictionary, where key is a port number and the value is the corresponding input/output port. - -Attributes ``in_ports_count`` and ``out_ports_count`` of the ``Op`` class instance define default number of input and output -ports to be created for the ``Node``. However, additional input/output ports can be added using methods -``add_input_port()`` and ``add_output_port()``. Port also can be removed, using the ``delete_input_port()`` and -``delete_output_port()`` methods. - -The ``Port`` class is just an abstraction that works with edges incoming/outgoing to/from a specific ``Node`` instance. For -example, output port with ``idx = 1`` corresponds to the outgoing edge of a node with an attribute ``out = 1``, the input -port with ``idx = 2`` corresponds to the incoming edge of a node with an attribute ``in = 2``. - -Consider the example of a graph part with 4 operation nodes "Op1", "Op2", "Op3", and "Op4" and a number of data nodes -depicted with light green boxes. - -.. image:: ../../../../assets/images/MO_ports_example_1.svg - :scale: 80 % - :align: center - -Operation nodes have input ports (yellow squares) and output ports (light purple squares). Input port may not be -connected. For example, the input **port 2** of node **Op1** does not have incoming edge, while output port always has an -associated data node (after the partial inference when the data nodes are added to the graph), which may have no -consumers. - -Ports can be used to traverse a graph. The method ``get_source()`` of an input port returns an output port producing the -tensor consumed by the input port. It is important that the method works the same during front, middle and back phases of a -model conversion even though the graph structure changes (there are no data nodes in the graph during the front phase). - -Let's assume that there are 4 instances of ``Node`` object ``op1, op2, op3``, and ``op4`` corresponding to nodes **Op1**, **Op2**, -**Op3**, and **Op4**, respectively. The result of ``op2.in_port(0).get_source()`` and ``op4.in_port(1).get_source()`` is the -same object ``op1.out_port(1)`` of type ``Port``. - -The method ``get_destination()`` of an output port returns the input port of the node consuming this tensor. If there are -multiple consumers of this tensor, the error is raised. The method ``get_destinations()`` of an output port returns a -list of input ports consuming the tensor. - -The method ``disconnect()`` removes a node incoming edge corresponding to the specific input port. The method removes -several edges if it is applied during the front phase for a node output port connected with multiple nodes. - -The method ``port.connect(another_port)`` connects output port ``port`` and input port ``another_port``. The method handles -situations when the graph contains data nodes (middle and back phases) and does not create an edge between two nodes -but also automatically creates data node or reuses existing data node. If the method is used during the front phase and -data nodes do not exist, the method creates edge and properly sets ``in`` and ``out`` edge attributes. - -For example, applying the following two methods to the graph above will result in the graph depicted below: - -.. code-block:: py - :force: - - op4.in_port(1).disconnect() - op3.out_port(0).connect(op4.in_port(1)) - -.. image:: ../../../../assets/images/MO_ports_example_2.svg - :scale: 80 % - :align: center - -.. note:: - For a full list of available methods, refer to the ``Node`` class implementation in the ``mo/graph/graph.py`` and ``Port`` class implementation in the ``mo/graph/port.py`` files. - -=========== -Connections -=========== - -Connection is a concept introduced to easily and reliably perform graph modifications. Connection corresponds to a -link between a source output port with one or more destination input ports or a link between a destination input port -and source output port producing data. So each port is connected with one or more ports with help of a connection. -Model Optimizer uses the ``mo.graph.connection.Connection`` class to represent a connection. - -There is only one ``get_connection()`` method of the ``Port`` class to get the instance of the corresponding ``Connection`` -object. If the port is not connected, the returned value is ``None``. - -For example, the ``op3.out_port(0).get_connection()`` method returns a ``Connection`` object encapsulating edges from node -**Op3** to data node **data_3_0** and two edges from data node **data_3_0** to two ports of the node **Op4**. - -The ``Connection`` class provides methods to get source and destination(s) ports the connection corresponds to: - -* ``connection.get_source()`` - returns an output ``Port`` object producing the tensor. -* ``connection.get_destinations()`` - returns a list of input ``Port`` consuming the data. -* ``connection.get_destination()`` - returns a single input ``Port`` consuming the data. If there are multiple consumers, the exception is raised. - -The ``Connection`` class provides methods to modify a graph by changing a source or destination(s) of a connection. For -example, the function call ``op3.out_port(0).get_connection().set_source(op1.out_port(0))`` changes source port of edges -consuming data from port ``op3.out_port(0)`` to ``op1.out_port(0)``. The transformed graph from the sample above is depicted -below: - -.. image:: ../../../../assets/images/MO_connection_example_1.svg - :scale: 80 % - :align: center - -Another example is the ``connection.set_destination(dest_port)`` method. It disconnects ``dest_port`` and all input ports to which -the connection is currently connected and connects the connection source port to ``dest_port``. - -Note that connection works seamlessly during front, middle, and back phases and hides the fact that the graph structure is -different. - -.. note:: - For a full list of available methods, refer to the ``Connection`` class implementation in the ``mo/graph/connection.py`` file. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility <../legacy-model-optimizer-extensibility>` -* :doc:`Model Optimizer Extensions <[legacy]-model-optimizer-extensions>` -* :doc:`Extending Model Optimizer with Caffe Python Layers <[legacy]-extending-model-optimizer-with-caffe-python-layers>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions.rst deleted file mode 100644 index db252965cb84e9..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions.rst +++ /dev/null @@ -1,60 +0,0 @@ -[LEGACY] Model Optimizer Extensions -===================================== - -.. meta:: - :description: Learn about deprecated extensions, which enable injecting logic - to the model conversion pipeline without changing the Model - Optimizer core code. - -.. toctree:: - :maxdepth: 1 - :hidden: - - [legacy]-model-optimizer-extensions/[legacy]-model-optimizer-operation - [legacy]-model-optimizer-extensions/[legacy]-optimizer-extractor - [legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../../openvino-extensibility/frontend-extensions>` article. - -Model Optimizer extensions enable you to inject some logic to the model conversion pipeline without changing the Model -Optimizer core code. There are three types of the Model Optimizer extensions: - -1. :doc:`Model Optimizer operation <[legacy]-model-optimizer-extensions/[legacy]-model-optimizer-operation>`. -2. A :doc:`framework operation extractor <[legacy]-model-optimizer-extensions/[legacy]-optimizer-extractor>`. -3. A :doc:`model transformation <[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions>`, which can be executed during front, middle or back phase of the model conversion. - -An extension is just a plain text file with a Python code. The file should contain a class (or classes) inherited from -one of extension base classes. Extension files should be saved to a directory with the following structure: - -.. code-block:: sh - - .// - ops/ - custom operations - front/ - framework independent front transformations - / - front transformations for models only and extractors for operations - / - front transformations for models only and extractors for operations - ... - middle/ - middle transformations - back/ - back transformations - -Model Optimizer uses the same layout internally to keep built-in extensions. The only exception is that the -``mo/ops/`` directory is also used as a source of the Model Optimizer operations due to historical reasons. - -.. note:: - The name of a root directory with extensions should not be equal to "extensions" because it will result in a name conflict with the built-in Model Optimizer extensions. - -.. note:: - Model Optimizer itself is built by using these extensions, so there is a huge number of examples of their usage in the Model Optimizer code. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility <../legacy-model-optimizer-extensibility>` -* :doc:`Graph Traversal and Modification Using Ports and Connections <[legacy]-graph-traversal-and-modification>` -* :doc:`Extending Model Optimizer with Caffe Python Layers <[legacy]-extending-model-optimizer-with-caffe-python-layers>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions.rst deleted file mode 100644 index 95f722ee063443..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-graph-transformation-extensions.rst +++ /dev/null @@ -1,605 +0,0 @@ -[LEGACY] Graph Transformation Extensions -========================================== - -.. meta:: - :description: Learn about various base classes for front, middle and back phase - transformations applied during model conversion with Model Optimizer. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../../../openvino-extensibility/frontend-extensions>` article. - -Model Optimizer provides various base classes to implement :ref:`Front Phase Transformations `, -:ref:`Middle Phase Transformations `, and :ref:`Back Phase Transformations `. -All classes have the following common class attributes and methods: - -1. The ``enabled`` attribute specifies whether the transformation is enabled or not. The value can be changed during runtime to enable or disable execution of the transformation during a model conversion. Default value is ``True``. -2. The ``id`` attribute specifies a unique transformation string identifier. This transformation identifier can be used to enable (disable) the transformation by setting environment variable ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) with a comma separated list of ``ids``. The environment variables override the value of the ``enabled`` attribute of the transformation. Instead of using ``id`` attribute value you can add fully defined class name to ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) variable, ``extensions.back.NonmalizeToNormalizeL2.NormalizeToNormalizeL2`` for example. It is an optional attribute. -3. The ``run_not_recursively`` attribute specifies whether the transformation should be executed in the sub-graphs, for example, body of the :doc:`TensorIterator <../../../../openvino-ir-format/operation-sets/operation-specs/infrastructure/tensor-iterator-1>` and the :doc:`Loop <../../../../openvino-ir-format/operation-sets/operation-specs/infrastructure/loop-5>`. Default value is ``True``. -4. The ``force_clean_up`` attribute specifies whether the graph clean up should be executed after the transformation. The graph cleanup removes nodes of the graph not reachable from the model inputs. Default value is ``False``. -5. The ``force_shape_inference`` attribute specifies whether the nodes marked with ``need_shape_inference`` attribute equal to ``True`` should be re-inferred after the transformation. Model Optimizer sets this attribute automatically for nodes, input(s) of which were changed during the transformation, or you can set this attribute manually in the transformation for the specific nodes. Default value is ``False``. -6. Attribute ``graph_condition`` specifies a list of functions with one parameter -- ``Graph`` object. The transformation is executed if and only if all functions return ``True``. If the attribute is not set, no check is performed. -7. Method ``run_before()`` returns a list of transformation classes which this transformation should be executed before. -8. Method ``run_after()`` returns a list of transformation classes which this transformation should be executed after. - -.. note:: - Some of the transformation types have specific class attributes and methods, which are explained in the corresponding sections of this document. - -Model Optimizer builds a graph of dependencies between registered transformations and executes them in the topological -order. To execute the transformation during a proper model conversion phase, Model Optimizer defines several -anchor transformations that do nothing. All transformations are ordered with respect to these anchor transformations. -The diagram below shows anchor transformations, some of built-in transformations and dependencies between them: - -.. image:: ../../../../../assets/images/MO_transformations_graph.svg - -User-defined transformations are executed after the corresponding ``Start`` and before the corresponding ``Finish`` anchor -transformations by default (if ``run_before()`` and ``run_after()`` methods have not been overridden). - -.. note:: - The ``PreMiddleStart`` and ``PostMiddleStart`` anchors were introduced due to historical reasons to refactor the Model Optimizer pipeline, which initially had a hardcoded order of transformations. - -.. _mo_front_phase_transformations: - -=========================== -Front Phase Transformations -=========================== - -There are several types of a front phase transformation: - -1. :ref:`Pattern-Defined Front Phase Transformations ` triggered for each sub-graph of the original graph isomorphic to the specified pattern. -2. :ref:`Specific Operation Front Phase Transformations ` triggered for the node with a specific ``op`` attribute value. -3. :ref:`Generic Front Phase Transformations `. -4. Manually enabled transformation, defined with a JSON configuration file (for TensorFlow, ONNX, and PaddlePaddle models), specified using the ``--transformations_config`` command-line parameter: - - 1. :ref:`Node Name Pattern Front Phase Transformations `. - 2. :ref:`Front Phase Transformations Using Start and End Points `. - 3. :ref:`Generic Front Phase Transformations Enabled with Transformations Configuration File `. - -.. _pattern_defined_front_phase_transformations: - -Pattern-Defined Front Phase Transformations -########################################### - -This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` and -``mo.front.common.replacement.FrontReplacementPattern`` as base classes and works as follows: - -1. Define a sub-graph to be matched, using a list of nodes with attributes and edges connecting them (edges may also have attributes). -2. Model Optimizer searches for all sub-graphs of the original graph, isomorphic to the specified sub-graph (pattern). -3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched sub-graph. You can override different functions in the base transformation class so the Model Optimizer works differently: - - 1. The ``replace_sub_graph(self, graph, match)`` override the method. In this case Model Optimizer only executes the overridden function, pass the ``graph`` object and a dictionary describing the matched sub-graph. You are required to write the transformation and connect the newly created nodes to the rest of the graph. - 2. The ``generate_sub_graph(self, graph, match)`` override the method. This case is not recommended for use because it is the most complicated approach. It can be effectively replaced with one of two previous approaches. - -The sub-graph pattern is defined in the ``pattern()`` function. This function should return a dictionary with two keys: -``nodes`` and ``edges``: - -* The value for the ``nodes`` key is a list of tuples with two elements. - - * The first element is an alias name for a node that will be used to define edges between nodes and in the transformation function. - * The second element is a dictionary with attributes. The key is a name of an attribute that should exist in the node. The value for the attribute can be some specific value to match or a function that gets a single parameter - the attribute value from the node. The function should return the result of attribute comparison with a dedicated value. - -* The value for the ``edges`` key is a list of tuples with two or three elements. - - * The first element is the alias name of the node producing a tensor. - * The second element is the alias name of the node consuming the tensor. - * The third element (optional) is the dictionary with expected edge attributes. This dictionary usually contains attributes like ``in`` and ``out``, defining input and output ports. - -Consider the example of a front transformation implemented in the ``extensions/front/Mish_fusion.py`` file performing -fusing of the sub-graph defining the :doc:`Mish <../../../../openvino-ir-format/operation-sets/operation-specs/activation/mish-4>` activation function into a single -operation: - -.. code-block:: py - :force: - - from openvino.tools.mo.front.Softplus_fusion import SoftplusFusion - from openvino.tools.mo.ops.activation_ops import Mish - from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph - from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch - from openvino.tools.mo.graph.graph import Graph, rename_nodes - - - class MishFusion(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with Softplus defining the Mish function: Mish(x) = x * tanh(SoftPlus(x)). - """ - enabled = True # Transformation is enabled. - - def run_after(self): # Run this transformation after "SoftplusFusion" transformation. - return [SoftplusFusion] - - def pattern(self): # Define pattern according to formulae x * tanh(SoftPlus(x)). - return dict( - nodes=[ - ('mul', dict(op='Mul')), - ('tanh', dict(op='Tanh')), - ('softplus', dict(op='SoftPlus')), - ], - edges=[ - ('softplus', 'tanh'), - ('tanh', 'mul'), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): # Entry point for the transformation. - mul = match['mul'] # Get the Node corresponding to matched "mul" node. - mul_name = mul.soft_get('name', mul.id) - softplus = match['softplus'] # Get the Node corresponding to the matched "softplus" node. - - # Determine the input port of Mul which gets the 'input' node output. - input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') == 'Tanh') - - # Check that the same tensor is provided as input to Mul and SoftPlus. - if mul.in_port(input_port_idx).get_source() != softplus.in_port(0).get_source(): - return - - mish = Mish(graph, {}).create_node() # Create Mish operation. - mish.in_port(0).connect(mul.in_port(input_port_idx).get_source()) # Connect input to the Mish. - mul.out_port(0).get_connection().set_source(mish.out_port(0)) # Reconnect outgoing edge from "mul" to Mish. - - # Rename the created Mish operation to have the name of the "mul" node, which produced the value equal to the - # Mish output. - rename_nodes([(mul, mul_name + '/TBR'), (mish, mul_name)]) - -.. _specific_operation_front_phase_transformations: - -Specific Operation Front Phase Transformations -############################################## - -This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementOp`` as base class and -works as follows: - -1. Define an operation type to trigger the transformation. -2. Model Optimizer searches for all nodes in the graph with the attribute ``op`` equal to the specified value. -3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched node. You can override different functions in the base transformation class and Model Optimizer works differently: - - 1. The ``replace_sub_graph(self, graph, match)`` override method. In this case, Model Optimizer only executes the overridden function. Pass the ``graph`` object and a dictionary with a single key ``op`` with the matched node as value. You are required to write the transformation and connect the newly created nodes to the rest of the graph. - 2. The ``replace_op(self, graph, node)`` override method. In this case, Model Optimizer executes the overridden function. Pass the ``graph`` object and the matched node as ``node`` parameter. If the function returns an ``id`` of some node, then the ``Node`` with this ``id`` is connected to the consumers of the matched node. After applying the transformation, the matched node is removed from the graph. - -The ``FrontReplacementOp`` class provides a simpler mechanism to match a single operation with specific value of the ``op`` -(write the ``op`` attribute in the class instead of defining a ``pattern()`` function) attribute and perform the -transformation. - -Consider an example transformation from the ``extensions/front/Pack.py`` file, which replaces ``Pack`` operation from -the TensorFlow: - -.. code-block:: py - :force: - - from openvino.tools.mo.front.common.partial_infer.utils import int64_array - from openvino.tools.mo.front.common.replacement import FrontReplacementOp - from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs - from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes - from openvino.tools.mo.ops.concat import Concat - from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - - class Pack(FrontReplacementOp): - op = "Pack" # Trigger transformation for all nodes in the graph with the op = "Pack" attribute - enabled = True # Transformation is enabled. - - def replace_op(self, graph: Graph, node: Node): # Entry point for the transformation. - # Create a Concat operation with a number of inputs equal to a number of inputs to Pack. - out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node() - pack_name = node.soft_get('name', node.id) - - for ind in node.in_ports(): - # Add dimension of size 1 to all inputs of the Pack operation and add them as Concat inputs. - unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])}, - {'name': node.soft_get('name', node.id) + '/Unsqueeze'}) - node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0)) - unsqueeze_node.out_port(0).connect(out_node.in_port(ind)) - - # Rename the created Concat operation to have the name of the "pack" node, which produced the value equal to the - # Concat output. - rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)]) - return [out_node.id] # Reconnect the Pack operation consumers to get input from Concat instead. - - -.. _generic_front_phase_transformations: - -Generic Front Phase Transformations -################################### - -Model Optimizer provides a mechanism to implement generic front phase transformation. This type of transformation is -implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` or -``mo.front.common.replacement.FrontReplacementPattern`` as base classes. Make sure the transformation is enabled before trying to execute it. -Then, Model Optimizer executes the ``find_and_replace_pattern(self, graph)`` method and -provides a ``Graph`` object as an input. - -Consider the example of a generic front transformation from the ``extensions/front/SqueezeNormalize.py`` file performing -normalization of the :doc:`Squeeze <../../../../openvino-ir-format/operation-sets/operation-specs/shape/squeeze-1>` operation. Older version of the operation had a list of -axes to squeeze as an attribute, but now it is a separate input. For backward compatibility, the Model Optimizer -operation supports both semantics. Before IR generation, however, the operation should be normalized according to the -specification. - -.. code-block:: py - :force: - - import logging as log - - from openvino.tools.mo.front.common.partial_infer.utils import int64_array - from openvino.tools.mo.front.common.replacement import FrontReplacementPattern - from openvino.tools.mo.graph.graph import Graph - from openvino.tools.mo.ops.const import Const - from openvino.tools.mo.utils.error import Error - - - class SqueezeNormalize(FrontReplacementPattern): - """ - Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the - dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed. - """ - enabled = True # The transformation is enabled. - - def find_and_replace_pattern(self, graph: Graph): # The function is called unconditionally. - for squeeze_node in graph.get_op_nodes(op='Squeeze'): # Iterate over all nodes with op='Squeeze'. - # If the operation has only 1 input node and no 'squeeze_dims' Node attribute, then convert the attribute to - # the operation input. - if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'): - dims_node = Const(graph, {'name': squeeze_node.id + '/Dims', - 'value': int64_array(squeeze_node.squeeze_dims)}).create_node() - squeeze_node.in_port(1).connect(dims_node.out_port(0)) - del squeeze_node['squeeze_dims'] - # If two inputs already exist, that means the operation is already normalized. - elif len(squeeze_node.in_nodes()) == 2: - log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name)) - # In all other cases, raise an error. - else: - raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" ' - 'attribute'.format(squeeze_node.soft_get('name'))) - -For the details on implementation and how these front phase transformations work, refer to the ``mo/front/common/replacement.py`` -file. - -.. _node_name_pattern_front_phase_transformations: - -Node Name Pattern Front Phase Transformations -############################################# - -TensorFlow uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing -particular task into the same scope. This approach divides a graph into logical blocks that are easier to review in the -TensorBoard. The scope, in fact, just defines a common name prefix for the nodes belonging to it. - -For example, Inception topologies contain several types of so-called **Inception blocks**. Some of them are equal to each -other, but located in different places of the network. For example, Inception V4 from the -`TensorFlow-Slim image classification model library `__ has -``Mixed_5b``, ``Mixed_5c`` and ``Mixed_5d`` inception blocks with exactly the same nodes, with the same set of attributes. - -Consider a situation when these Inception blocks are implemented extremely efficiently using a single Inference -Engine operation called ``InceptionBlock`` and these blocks in the model need to be replaced with instances of this operation. -Model Optimizer provides mechanism to trigger the transformation for a sub-graph of operations defined by the node name -regular expressions (scope). In this particular case, some of the patterns are: ``.*InceptionV4/Mixed_5b``, -``.*InceptionV4/Mixed_5c`` and ``.*InceptionV4/Mixed_5d``. Each pattern starts with ``.*``, because the ``InceptionV4`` prefix -is added to all nodes names during a model freeze. - -This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a -base class and works as follows: - -1. Prepare a JSON configuration file template defining node names patterns. -2. Run Model Optimizer with the ``--tensorflow_custom_operations_config_update`` command-line parameter, and Model Optimizer adds information about input and output nodes of the specified sub-graphs. -3. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file updated in step 2 using the ``--transformations_config`` command-line parameter. - -Consider the following possible configuration file template for the Inception Block transformation: - -.. code-block:: json - - [ - { - "custom_attributes": { - "attr1_key": "attr1_value", - "attr2_key": 123456 - }, - "id": "InceptionBlockTransformation", - "instances": [ - ".*InceptionV4/Mixed_5b", - ".*InceptionV4/Mixed_5c", - ".*InceptionV4/Mixed_5d" - ], - "match_kind": "scope" - } - ] - -The configuration file contains a list of dictionaries. Each dictionary defines one transformation. Each transformation -is defined with several parameters: - -* ``id`` - **(Mandatory)** — is a unique identifier of the transformation. It is used in the Python code that implements the transformation to link the class and the transformation description from the configuration file. -* ``match_kind`` - **(Mandatory)** — is a string that specifies the matching algorithm. For the node name pattern case, the value should be equal to ``scope``. Another possible values are described in the dedicated sections below. -* ``instances`` - **(Mandatory)** — specifies instances of the sub-graph to be matched. It contains a list of node names prefixes patterns for the match kind of the ``scope`` type. -* ``custom_attributes`` - **(Optional)** — is a dictionary with attributes that can be used in the transformation code. - -After running Model Optimizer with additional ``--tensorflow_custom_operations_config_update`` parameter pointing to -the template configuration file, the content of the file should be updated with two new sections ``inputs`` and ``outputs``. -The file content after the update is as follows: - -.. code-block:: json - - [ - { - "id": "InceptionBlockTransformation", - "custom_attributes": { - "attr1_key": "attr1_value", - "attr2_key": 123456 - }, - "instances": [ - ".*InceptionV4/Mixed_5b", - ".*InceptionV4/Mixed_5c", - ".*InceptionV4/Mixed_5d" - ], - "match_kind": "scope", - "inputs": [ - [ - { - "node": "Branch_2/Conv2d_0a_1x1/Conv2D$", - "port": 0 - }, - { - "node": "Branch_3/AvgPool_0a_3x3/AvgPool$", - "port": 0 - }, - { - "node": "Branch_1/Conv2d_0a_1x1/Conv2D$", - "port": 0 - }, - { - "node": "Branch_0/Conv2d_0a_1x1/Conv2D$", - "port": 0 - } - ] - ], - "outputs": [ - { - "node": "concat$", - "port": 0 - } - ] - } - ] - -The value for ``inputs`` key is a list of lists describing input tensors of the sub-graph. Each element of the top-level -list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming -this tensor and port numbers, where the tensor is consumed. Model Optimizer generates regular expressions for the input -nodes names to uniquely identify them in each instance of the sub-graph, defined by the ``instances``. Denote these nodes -as input nodes of the sub-graph. - -In the InceptionV4 topology, the ``InceptionV4/Mixed_5b`` block has four input tensors from outside of the sub-graph, -but all of them are produced by the ``InceptionV4/Mixed_5a/concat`` node. Therefore, the top-level list of the ``inputs`` -contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by -``InceptionV4/Mixed_5a/concat`` node. In this case, all four input nodes consume input tensor into "port 0". - -The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level -list is important. This order defines how Model Optimizer attaches input tensors to a new generated -node if the sub-graph is replaced with a single node. The ``i``-th input node of the sub-graph is obtained using -``match.single_input_node(i)`` call in the sub-graph transformation code. More information about API is given below. If it is -necessary to change the order of input tensors, the configuration file can be edited in the text editor. - -The value for the ``outputs`` key is a list describing nodes of the sub-graph producing tensor, that goes outside of the -sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in -the list is important. The ``i``-th element of the list describes the ``i``-th output tensor of the sub-graph, which could be -obtained using ``match.output_node(i)`` call. The order of elements can be manually changed in the configuration file. -Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node. - -For more examples of this type of transformation, refer to the :doc:`Converting TensorFlow Object Detection API Models <../../legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection>` guide. - -.. _start_end_points_front_phase_transformations: - -Front Phase Transformations Using Start and End Points -###################################################### - -This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a -base class and works as follows: - -1. Prepare a JSON configuration file that defines the sub-graph to match, using two lists of node names: "start" and "end" nodes. -2. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file using the ``--transformations_config`` command-line parameter . Model Optimizer performs the following steps to match the sub-graph: - - 1. Starts a graph traversal from every start node following the direction of the graph edges. The search stops in an end node or in the case of a node without consumers. All visited nodes are added to the matched sub-graph. - 2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from the "start" list. In this step, the edges are traversed in the opposite edge direction. All newly visited nodes are added to the matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the matched sub-graph. - 3. Checks that all "end" nodes were reached from "start" nodes. If not, it exits with an error. - 4. Checks that there are no :doc:`Parameter <../../../../openvino-ir-format/operation-sets/operation-specs/infrastructure/parameter-1>` operations among added nodes. If they exist, the sub-graph depends on the inputs of the model. Such configuration is considered incorrect so Model Optimizer exits with an error. - -This algorithm finds all nodes "between" start and end nodes and nodes needed for calculation of non-input nodes of the -matched sub-graph. - -The example of a JSON configuration file for a transformation with start and end points is -``extensions/front/tf/ssd_support_api_v1.15.json``: - -.. code-block:: json - - [ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/scale_logits", - "Postprocessor/Tile", - "Postprocessor/Reshape_1", - "Postprocessor/Cast_1" - ] - }, - "match_kind": "points" - } - ] - -The format of the file is similar to the one provided as an example in the -:ref:`Node Name Pattern Front Phase Transformations ` section. The difference is in -the value of the ``match_kind`` parameter, which should be equal to the ``points`` and the format of the ``instances`` parameter, -which should be a dictionary with two keys ``start_points`` and ``end_points``, defining start and end node names -respectively. - -.. note:: - The ``include_inputs_to_sub_graph`` and ``include_outputs_to_sub_graph`` parameters are redundant and should be always equal to ``true``. - -.. note:: - This sub-graph match algorithm has a limitation that each start node must have only one input. Therefore, it is not possible to specify, for example, the :doc:`Convolution <../../../../openvino-ir-format/operation-sets/operation-specs/convolution/convolution-1>` node as input because it has two inputs: data tensor and tensor with weights. - -For other examples of transformations with points, refer to the -:doc:`Converting TensorFlow Object Detection API Models <../../legacy-conversion-api/[legacy]-supported-model-formats/[legacy]-conversion-tutorials/convert-tensorflow-object-detection>` guide. - -.. _generic_transformations_config_front_phase_transformations: - -Generic Front Phase Transformations Enabled with Transformations Configuration File -################################################################################### - -This type of transformation works similarly to the :ref:`Generic Front Phase Transformations ` -but require a JSON configuration file to enable it similarly to -:ref:`Node Name Pattern Front Phase Transformations ` and -:ref:`Front Phase Transformations Using Start and End Points `. - -The base class for this type of transformation is -``mo.front.common.replacement.FrontReplacementFromConfigFileGeneral``. Model Optimizer executes the -``transform_graph(self, graph, replacement_descriptions)`` method and provides the ``Graph`` object and dictionary with values -parsed from the `custom_attributes` attribute of the provided JSON configuration file. - -The example of the configuration file for this type of transformation is ``extensions/front/tf/yolo_v1_tiny.json``: - -.. code-block:: json - - [ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "classes": 20, - "coords": 4, - "num": 2, - "do_softmax": 0 - } - } - ] - -and the corresponding transformation file is ``./extensions/front/YOLO.py``: - -.. code-block:: py - :force: - - from openvino.tools.mo.front.no_op_eraser import NoOpEraser - from openvino.tools.mo.front.standalone_const_eraser import StandaloneConstEraser - from openvino.tools.mo.ops.regionyolo import RegionYoloOp - from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral - from openvino.tools.mo.graph.graph import Node, Graph - from openvino.tools.mo.ops.result import Result - from openvino.tools.mo.utils.error import Error - - - class YoloRegionAddon(FrontReplacementFromConfigFileGeneral): - """ - Replaces all Result nodes in graph with YoloRegion->Result nodes chain. - YoloRegion node attributes are taken from configuration file - """ - replacement_id = 'TFYOLO' # The identifier matching the "id" attribute in the JSON file. - - def run_after(self): - return [NoOpEraser, StandaloneConstEraser] - - def transform_graph(self, graph: Graph, replacement_descriptions): - op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result'] - for op_output in op_outputs: - last_node = Node(graph, op_output).in_node(0) - op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1) - op_params.update(replacement_descriptions) - region_layer = RegionYoloOp(graph, op_params) - region_layer_node = region_layer.create_node([last_node]) - # In here, 'axis' from 'dim_attrs' can be removed to avoid permutation from axis = 1 to axis = 2. - region_layer_node.dim_attrs.remove('axis') - Result(graph).create_node([region_layer_node]) - graph.remove_node(op_output) - -The configuration file has only 3 parameters: ``id`` identifier of the transformation , ``match_kind`` (which should be equal -to ``general``) and the ``custom_attributes`` dictionary with custom attributes accessible in the transformation. - -.. _mo_middle_phase_transformations: - -============================ -Middle Phase Transformations -============================ - -There are two types of middle phase transformations: - -1. :ref:`Pattern-Defined Middle Phase Transformations ` triggered for each sub-graph of the original graph, isomorphic to the specified pattern. -2. :ref:`Generic Middle Phase Transformations `. - -.. _pattern_defined_middle_phase_transformations: - -Pattern-Defined Middle Phase Transformations -############################################ - -This type of transformation is implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and -works similarly to the :ref:`Pattern-Defined Middle Phase Transformations ` -The are two differences: - -1. The transformation entry function name is ``replace_pattern(self, graph, match)``. -2. The pattern defining the graph should contain data nodes because the structure of the graph is different between front and middle phases. For more information about the graph structure changes, refer to the :ref:`Partial Inference `. - -For the example of a pattern-defined middle transformation, refer to the ``extensions/middle/L2NormToNorm.py`` file. - -.. _generic_middle_phase_transformations: - -Generic Middle Phase Transformations -#################################### - -Model Optimizer provides a mechanism to implement generic middle phase transformations. This type of transformation is -implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and works similarly to the -:ref:`Generic Front Phase Transformations `. The only difference is that the -transformation entry function name is ``find_and_replace_pattern(self, graph: Graph)``. - -For the example of this transformation, refer to the ``extensions/middle/CheckForCycle.py`` file. - -.. _mo_back_phase_transformations: - -========================== -Back Phase Transformations -========================== - -There are two types of back phase transformations: - -1. :ref:`Pattern-Defined Back Phase Transformations ` triggered for each sub-graph of the original graph, isomorphic to the specified pattern. -2. :ref:`Generic Back Phase Transformations `. - -.. note:: - The graph layout during the back phase is always NCHW. However, during the front and middle phases it could be NHWC if the original model was using it. For more details, refer to :ref:`Model Conversion Pipeline `. - -.. _pattern_defined_back_phase_transformations: - -Pattern-Defined Back Phase Transformations -########################################## - -This type of transformation is implemented using ``mo.back.replacement.MiddleReplacementPattern`` as a base class and -works the same way as :ref:`Pattern-Defined Middle Phase Transformations `. - -For the example of a pattern-defined back transformation, refer to the ``extensions/back/ShufflenetReLUReorder.py`` file. - -.. _generic_back_phase_transformations: - -Generic Back Phase Transformations -################################## - -Model Optimizer provides mechanism to implement generic back phase transformations. This type of transformation is -implemented using ``mo.back.replacement.BackReplacementPattern`` as a base class and works the same way as -:ref:`Generic Middle Phase Transformations `. - -For the example of this transformation, refer to the ``extensions/back/GatherNormalizer.py`` file. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility <../../legacy-model-optimizer-extensibility>` -* :doc:`Graph Traversal and Modification Using Ports and Connections <../../legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification>` -* :doc:`Model Optimizer Extensions <../[legacy]-model-optimizer-extensions>` -* :doc:`Extending Model Optimizer with Caffe Python Layers <../[legacy]-extending-model-optimizer-with-caffe-python-layers>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-model-optimizer-operation.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-model-optimizer-operation.rst deleted file mode 100644 index 61c43f72dfade9..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-model-optimizer-operation.rst +++ /dev/null @@ -1,110 +0,0 @@ -[LEGACY] Model Optimizer Operation -=================================== - -.. meta:: - :description: Learn about the Op class, that contains operation attributes, - which are set to a node of the graph created during model - conversion with Model Optimizer. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../../../openvino-extensibility/frontend-extensions>` article. - -Model Optimizer defines a ``mo.ops.Op`` class (``Op`` will be used later in the document to be short), which is a base class -for an operation used in the Model Optimizer. The instance of the ``Op`` class serves several purposes: - -1. Stores the operation attributes. -2. Stores the operation shape/value and type inference functions. -3. Defines operation attributes to be saved to the corresponding IR section. -4. Contains convenient methods to create a graph node from an ``Op`` object instance and connect it with the existing graph. -5. Used in the extractors to store parsed attributes and operation specific attributes in the dedicated graph node. - -It is important to mention that there is no connection between the instance of the ``Op`` class and the ``Node`` object -created from it. The ``Op`` class is just a container for attributes describing the operation. Model Optimizer uses the ``Op`` -class during a model conversion to create a node of the graph with attributes copied from the ``Op`` class instance. Graph -manipulations are performed with graph ``Nodes`` and their attributes and does not involve ``Ops``. - -There are a number of common attributes used in the operations. Below is the list of these attributes with description. - -* ``id`` — **(Mandatory)** — unique identifier of a node in a graph. Generated automatically, equal to the number of nodes in the graph plus 1 if not specified. -* ``name`` — **(Mandatory)** — name of the operation. Generated automatically, equal to the ``id`` if not specified. -* ``type`` — **(Mandatory)** — type of the operation according to the :doc:`opset specification <../../../../openvino-ir-format/operation-sets/available-opsets>`. For the internal Model Optimizer operations, this attribute should be set to ``None``. The model conversion fails if an operation with ``type`` equal to ``None`` comes to the IR emitting phase. -* ``version`` — **(Mandatory)** — the operation set (opset) name the operation belongs to. If not specified, Model Optimizer sets it equal to ``experimental``. For more information about operation sets, refer to :doc:`OpenVINO Model Representation <../../../../../openvino-workflow/running-inference/integrate-openvino-with-your-application/model-representation>` section. -* ``op`` — Model Optimizer type of the operation. In many cases, the value of ``type`` is equal to the value of ``op``. However, when Model Optimizer cannot instantiate the opset operation during model loading, it creates an instance of an internal operation. Thus, the attribute ``op`` is used as a type of this internal operation. Later in the pipeline, the node created from an internal operation will be replaced during front, middle or back phase with node(s) created from the opset. -* ``infer`` — the attribute defines a function calculating output tensor(s) shape and optional value(s). The attribute may be set to ``None`` for the internal Model Optimizer operations used during the front phase only. For more information about the shape inference function, refer to the :ref:`Partial Inference `. -* ``type_infer`` — the attribute defines a function calculating output tensor(s) data type. If the attribute is not defined, the default function is used. The function checks if the ``data_type`` node attribute is set and then propagates this type to the output tensor from the **port 0**. Otherwise, it propagates the data type of the tensor coming into the input **port 0** to the output tensor from the **port 0**. -* ``in_ports_count`` — default number of input ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods. -* ``out_ports_count`` — default number of output ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods. - -Below is an example of the Model Optimizer class for the :doc:`SoftMax <../../../../openvino-ir-format/operation-sets/operation-specs/activation/softmax-1>` operation from -the ``mo/ops/softmax.py`` file with the comments in code. - -.. code-block:: py - - class Softmax(Op): - # The class attribute defines a name of the operation so the operation class can be obtained using the - # "Op.get_op_class_by_name()" static method - op = 'SoftMax' - - # The operation works as an extractor by default. This is a legacy behavior, currently not recommended for use, - # thus "enabled" class attribute is set to False. The recommended approach is to use dedicated extractor extension. - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { # The constructor of the base class Op is called with additional default attributes. - 'type': __class__.op, # The operation is from the opset so the type is set to 'SoftMax'. - 'op': __class__.op, # Internal Model Optimizer operation has the same type. - 'version': 'opset1', # The operation corresponds to opset1. - 'infer': Softmax.infer, # Shape inference function is defined below. - 'axis': 1, # Default value for the "axis" attribute of the operation SoftMax. - 'in_ports_count': 1, # The operation has one input. - 'out_ports_count': 1, # The operation produces one output. - }, attrs) - - # The method returns operation specific attributes list. This method is important when implementing - # extractor inherited from CaffePythonFrontExtractorOp class to extract attribute for Caffe Python operation. - # However, it is currently used interchangeably with the "backend_attrs()" method. If the "backend_attrs()" is not used, - # then the "supported_attrs()" is used instead. In this particular case, the operation has just one attribute "axis". - def supported_attrs(self): - return ['axis'] - - @staticmethod - def infer(node: Node): - "some code calculating output shape and values" - -There is a dedicated method called ``backend_attrs()`` defining a list of attributes to be saved to the IR. Consider an -example from the ``mo/ops/pooling.py`` file: - -.. code-block:: py - - def backend_attrs(self): - return [ - ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), - ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))), - - ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))), - ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))), - - ('pool-method', 'pool_method'), - ('exclude-pad', 'exclude_pad'), - - 'rounding_type', - 'auto_pad', - ] - -The ``backend_attrs()`` function returns a list of records. A record can be of one of the following formats: -1. A string defining the attribute to be saved to the IR. If the value of the attribute is ``None``, the attribute is not saved. Examples of this case are ``rounding_type`` and ``auto_pad``. -2. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is a function to produce the value for this attribute. The function gets an instance of the ``Node`` as the only parameter and returns a string with the value to be saved to the IR. Examples of this case are ``strides``, ``kernel``, ``pads_begin`` and ``pads_end``. -3. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is the name of the ``Node`` attribute to get the value from. Examples of this case are ``pool-method`` and ``exclude-pad``. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility <../../legacy-model-optimizer-extensibility>` -* :doc:`Graph Traversal and Modification Using Ports and Connections <../../legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification>` -* :doc:`Model Optimizer Extensions <../[legacy]-model-optimizer-extensions>` -* :doc:`Extending Model Optimizer with Caffe Python Layers <../[legacy]-extending-model-optimizer-with-caffe-python-layers>` - diff --git a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-optimizer-extractor.rst b/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-optimizer-extractor.rst deleted file mode 100644 index 5de7ae93f86a7c..00000000000000 --- a/docs/articles_en/documentation/legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility/[legacy]-model-optimizer-extensions/[legacy]-optimizer-extractor.rst +++ /dev/null @@ -1,113 +0,0 @@ -[LEGACY] Operation Extractor -============================= - -.. meta:: - :description: Learn about a deprecated generic extension in Model Optimizer, - which provides the operation extractor usable for all model - frameworks. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <../../../../openvino-extensibility/frontend-extensions>` article. - -Model Optimizer runs specific extractor for each operation in the model during the model loading. - -There are several types of Model Optimizer extractor extensions: - -1. The generic one, which is described in this article. -2. The special extractor for Caffe models with Python layers. This kind of extractor is described in the :doc:`Extending Model Optimizer with Caffe Python Layers <../[legacy]-extending-model-optimizer-with-caffe-python-layers>` guide. - -Generic extension provides a generic mechanism for the operation extractor applicable for all frameworks. Model Optimizer provides the ``mo.front.extractor.FrontExtractorOp`` class as a base class to implement the extractor. It has the ``extract`` class method, which gets the only parameter ``Node``, which corresponds to the graph node to extract data from. The operation description in the original framework format is stored in the attribute ``pb`` of the node. The extractor goal is to parse this attribute and save necessary attributes to the corresponding node of the graph. Consider the extractor for the ``Const`` TensorFlow operation (refer to the ``extensions/front/tf/const_ext.py`` file): - -.. code-block:: py - :force: - - from openvino.tools.mo.front.extractor import FrontExtractorOp - from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content - from openvino.tools.mo.ops.const import Const - - - class ConstExtractor(FrontExtractorOp): - # The "op" class attribute defines a type of the operation in the framework (in this case it is a TensorFlow), - # for which the extractor should be triggered. - op = 'Const' - enabled = True # The flag that indicates that this extractor is enabled. - - @classmethod - def extract(cls, node): # The entry point of the extractor. - # The `node.pb` attribute stores the TensorFlow representation of the operation, which is a Protobuf message of the - # specific format. In particular, the message contains the attribute called "value" containing the description of - # the constant. The string "pb.attr["value"].tensor" is just a Python binding for Protobuf message parsing. - pb_tensor = node.pb.attr["value"].tensor - # Get the shape of the tensor from the protobuf message, using the helper function "tf_tensor_shape". - shape = tf_tensor_shape(pb_tensor.tensor_shape) - # Create a dictionary with necessary attributes. - attrs = { - 'shape': shape, - # Get the tensor value, using "tf_tensor_content" helper function. - 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor), - # Get the tensor data type, using "tf_dtype_extractor" helper function. - 'data_type': tf_dtype_extractor(pb_tensor.dtype), - } - # Update the node attributes, using default attributes from the "Const" operation and attributes saved to the - # "attrs" dictionary. - Const.update_node_stat(node, attrs) - return cls.enabled - -Consider another example with an extractor of the ``Constant`` ONNX operation (refer to the ``extensions/front/onnx/const_ext.py`` file): - -.. code-block:: py - :force: - - from onnx import numpy_helper - from onnx.numpy_helper import to_array - - from openvino.tools.mo.front.extractor import FrontExtractorOp - from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - from openvino.tools.mo.ops.const import Const - - - class ConstantExtractor(FrontExtractorOp): - op = 'Constant' - enabled = True - - @classmethod - def extract(cls, node): - # Use "onnx_attr" helper method, which parses the Protobuf representation of the operation saved in the "node". - # Gets the value of the attribute with name "value" as "TensorProto" type (specified with a keyword "t"). - pb_value = onnx_attr(node, 'value', 't') - # Use "numpy_helper.to_array()" ONNX helper method to convert "TensorProto" object to a numpy array. - value = numpy_helper.to_array(pb_value) - - attrs = { - 'data_type': value.dtype, - 'value': value, - } - # Update the node attributes, using default attributes from the "Const" operation and attributes saved to the - # "attrs" dictionary. - Const.update_node_stat(node, attrs) - return cls.enabled - -The extractors for operations from different frameworks work similarly. The only difference is in the helper methods used to parse operation attributes encoded with a framework-specific representation. - -A common practice is to use ``update_node_stat()`` method of the dedicated ``Op`` class to update the node attributes. This method does the following: - -1. Sets values for common attributes like ``op``, ``type``, ``infer``, ``in_ports_count``, ``out_ports_count``, ``version`` to values specific to the dedicated operation (``Const`` operation in this case). -2. Uses ``supported_attrs()`` and ``backend_attrs()`` methods, defined in the ``Op`` class to update specific node attribute ``IE``. The IR emitter uses the value stored in the ``IE`` attribute to pre-process attribute values and save them to IR. -3. Optionally sets additional attributes provided to the ``update_node_stat()`` function as a second parameter. Usually these attributes are parsed from the particular instance of the operation. - -.. note:: - Model Optimizer uses numpy arrays to store values and numpy arrays of ``np.int64`` type to store shapes in the graph. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility <../../legacy-model-optimizer-extensibility>` -* :doc:`Graph Traversal and Modification Using Ports and Connections <../../legacy-model-optimizer-extensibility/[legacy]-graph-traversal-and-modification>` -* :doc:`Model Optimizer Extensions <../[legacy]-model-optimizer-extensions>` -* :doc:`Extending Model Optimizer with Caffe Python Layers <../[legacy]-extending-model-optimizer-with-caffe-python-layers>` - diff --git a/docs/articles_en/documentation/openvino-ecosystem.rst b/docs/articles_en/documentation/openvino-ecosystem.rst index fe4f203428a865..1975fe0a48a181 100644 --- a/docs/articles_en/documentation/openvino-ecosystem.rst +++ b/docs/articles_en/documentation/openvino-ecosystem.rst @@ -116,15 +116,6 @@ generative AI and vision models directly on your computer or edge device using O :bdg-link-success:`User Guide ` OpenVINO Tokenizers add text processing operations to OpenVINO. -|hr| - - -| **OpenVINO's Open Model Zoo** -| :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` - -Open Model Zoo includes optimized deep learning models and a set of demos to -expedite development of high-performance deep learning inference applications. OpenVINO-based AI projects ########################## diff --git a/docs/articles_en/documentation/openvino-ecosystem/openvino-security-add-on.rst b/docs/articles_en/documentation/openvino-ecosystem/openvino-security-add-on.rst index 3959ebefb09a4a..043f05a90e2342 100644 --- a/docs/articles_en/documentation/openvino-ecosystem/openvino-security-add-on.rst +++ b/docs/articles_en/documentation/openvino-ecosystem/openvino-security-add-on.rst @@ -735,7 +735,7 @@ How to Use the OpenVINO™ Security Add-on This section requires interactions between the Model Developer/Independent Software vendor and the User. All roles must complete all applicable :ref:`set up steps ` and :ref:`installation steps ` before beginning this section. -This document uses the `face-detection-retail-0004 `__ model as an example. +This document uses a face-detection model as an example. The following figure describes the interactions between the Model Developer, Independent Software Vendor, and User. @@ -793,15 +793,8 @@ Step 2: Create a key store and add a certificate to it Step 3: Create the model ------------------------ -This example uses ``curl`` to download the ``face-detection-retail-004`` model from the OpenVINO Model Zoo. If you are behind a firewall, check and set your proxy settings. - -Download a model from the Model Zoo: - -.. code-block:: sh - - curl --create-dirs https://download.01.org/opencv/2021/openvinotoolkit/2021.1/open_../legacy-features/model-zoo/models_bin/1/face-detection-retail-0004/FP32/face-detection-retail-0004.xml https://download.01.org/opencv/2021/openvinotoolkit/2021.1/open_../legacy-features/model-zoo/models_bin/1/face-detection-retail-0004/FP32/face-detection-retail-0004.bin -o model/face-detection-retail-0004.xml -o model/face-detection-retail-0004.bin - -The model is downloaded to the ``OVSA_DEV_ARTEFACTS/model`` directory +Download a `model `__ in OpenVINO IR format to +the ``OVSA_DEV_ARTEFACTS/model`` directory. Step 4: Define access control for the model and create a master license for it ------------------------------------------------------------------------------- @@ -811,9 +804,9 @@ Define and enable the model access control and master license: .. code-block:: sh uuid=$(uuidgen) - /opt/ovsa/bin/ovsatool controlAccess -i model/face-detection-retail-0004.xml model/face-detection-retail-0004.bin -n "face detection" -d "face detection retail" -v 0004 -p face_detection_model.dat -m face_detection_model.masterlic -k isv_keystore -g $uuid + /opt/ovsa/bin/ovsatool controlAccess -i model/.xml model/.bin -n "name of the model" -d "detailed name of the model" -p .dat -m .masterlic -k isv_keystore -g $uuid -The Intermediate Representation files for the ``face-detection-retail-0004`` model are encrypted as ``face_detection_model.dat`` and a master license is generated as ``face_detection_model.masterlic`` +The Intermediate Representation files for the model are encrypted as ``.dat`` and a master license is generated as ``.masterlic`` Step 5: Create a Runtime Reference TCB -------------------------------------- @@ -824,7 +817,7 @@ Generate the reference TCB for the runtime .. code-block:: sh - /opt/ovsa/bin/ovsaruntime gen-tcb-signature -n "Face Detect @ Runtime VM" -v "1.0" -f face_detect_runtime_vm.tcb -k isv_keystore + /opt/ovsa/bin/ovsaruntime gen-tcb-signature -n "Face Detect @ Runtime VM" -v "1.0" -f model_inference_runtime_vm.tcb -k isv_keystore Step 6: Publish the access controlled Model and Runtime Reference TCB @@ -856,7 +849,7 @@ Step 7: Receive a User Request .. code-block:: sh cd $OVSA_DEV_ARTEFACTS - /opt/ovsa/bin/ovsatool sale -m face_detection_model.masterlic -k isv_keystore -l 30daylicense.config -t face_detect_runtime_vm.tcb -p custkeystore.csr.crt -c face_detection_model.lic + /opt/ovsa/bin/ovsatool sale -m .masterlic -k isv_keystore -l 30daylicense.config -t detect_runtime_vm.tcb -p custkeystore.csr.crt -c .lic 4. Update the license server database with the license. @@ -864,13 +857,13 @@ Step 7: Receive a User Request .. code-block:: sh cd /opt/ovsa/DB - python3 ovsa_store_customer_lic_cert_db.py ovsa.db $OVSA_DEV_ARTEFACTS/face_detection_model.lic $OVSA_DEV_ARTEFACTS/custkeystore.csr.crt + python3 ovsa_store_customer_lic_cert_db.py ovsa.db $OVSA_DEV_ARTEFACTS/.lic $OVSA_DEV_ARTEFACTS/custkeystore.csr.crt 5. Provide these files to the User: - * ``face_detection_model.dat`` - * ``face_detection_model.lic`` + * ``.dat`` + * ``.lic`` Model User Instructions +++++++++++++++++++++++ @@ -930,14 +923,14 @@ Step 4: Receive and load the access controlled model into the OpenVINO™ Model 1. Receive the model as files named: - * face_detection_model.dat - * face_detection_model.lic + * .dat + * .lic .. code-block:: sh cd $OVSA_RUNTIME_ARTEFACTS - scp username@://OVSA/artefacts/face_detection_model.dat . - scp username@://OVSA/artefacts/face_detection_model.lic . + scp username@://OVSA/artefacts/.dat . + scp username@://OVSA/artefacts/.lic . 2. Prepare the environment: @@ -954,8 +947,8 @@ Step 4: Receive and load the access controlled model into the OpenVINO™ Model .. code-block:: sh cd $OVSA_RUNTIME_ARTEFACTS/../ovms - cp $OVSA_RUNTIME_ARTEFACTS/face_detection_model.dat model/fd/1/. - cp $OVSA_RUNTIME_ARTEFACTS/face_detection_model.lic model/fd/1/. + cp $OVSA_RUNTIME_ARTEFACTS/.dat model/fd/1/. + cp $OVSA_RUNTIME_ARTEFACTS/.lic model/fd/1/. cp $OVSA_RUNTIME_ARTEFACTS/custkeystore model/fd/1/. 4. Rename and edit ``sample.json`` to include the names of the access controlled model artefacts you received from the Model Developer. The file looks like this: @@ -976,7 +969,7 @@ Step 4: Receive and load the access controlled model into the OpenVINO™ Model "config":{ "name":"controlled-access-model", "base_path":"/sampleloader/model/fd", - "custom_loader_options": {"loader_name": "ovsa", "keystore": "custkeystore", "controlled_access_file": "face_detection_model"} + "custom_loader_options": {"loader_name": "ovsa", "keystore": "custkeystore", "controlled_access_file": ""} } } ] @@ -1010,7 +1003,7 @@ Step 6: Prepare to run Inference pip3 install futures==3.1.1 pip3 install tensorflow-serving-api==1.14.0 -3. Copy the ``face_detection.py`` from the example_client in ``/opt/ovsa/example_client`` +3. Copy the ``detection.py`` from the example_client in ``/opt/ovsa/example_client`` .. code-block:: sh @@ -1027,11 +1020,11 @@ Step 6: Prepare to run Inference Step 7: Run Inference --------------------- -Run the ``face_detection.py`` script: +Run the ``detection.py`` script: .. code-block:: sh - python3 face_detection.py --grpc_port 3335 --batch_size 1 --width 300 --height 300 --input_images_dir images --output_dir results --tls --server_cert /var/OVSA/Modelserver/server.pem --client_cert /var/OVSA/Modelserver/client.pem --client_key /var/OVSA/Modelserver/client.key --model_name controlled-access-model + python3 detection.py --grpc_port 3335 --batch_size 1 --width 300 --height 300 --input_images_dir images --output_dir results --tls --server_cert /var/OVSA/Modelserver/server.pem --client_cert /var/OVSA/Modelserver/client.pem --client_key /var/OVSA/Modelserver/client.key --model_name controlled-access-model Summary diff --git a/docs/articles_en/documentation/openvino-extensibility.rst b/docs/articles_en/documentation/openvino-extensibility.rst index d166f1390d643d..6b2d0878bb687c 100644 --- a/docs/articles_en/documentation/openvino-extensibility.rst +++ b/docs/articles_en/documentation/openvino-extensibility.rst @@ -32,7 +32,7 @@ Custom operations, which are not included in the list, are not recognized by Ope 1. A new or rarely used regular framework operation is not supported in OpenVINO yet. 2. A new user operation that was created for some specific model topology by the author of the model using framework extension capabilities. -Importing models with such operations requires additional steps. This guide illustrates the workflow for running inference on models featuring custom operations. This allows plugging in your own implementation for them. OpenVINO Extensibility API enables adding support for those custom operations and using one implementation for Model Optimizer and OpenVINO Runtime. +Importing models with such operations requires additional steps. This guide illustrates the workflow for running inference on models featuring custom operations. This allows plugging in your own implementation for them. OpenVINO Extensibility API enables adding support for those custom operations and using one implementation for model conversion API and OpenVINO Runtime. Defining a new custom operation basically consists of two parts: @@ -56,21 +56,9 @@ Mapping from Framework Operation Mapping of custom operation is implemented differently, depending on model format used for import. If a model is represented in the ONNX (including models exported from PyTorch in ONNX), TensorFlow Lite, PaddlePaddle or -TensorFlow formats, then one of the classes from :doc:`Frontend Extension API ` -should be used. It consists of several classes available in C++ which can be used with the ``--extensions`` option in Model Optimizer -or when a model is imported directly to OpenVINO runtime using the ``read_model`` method. -Python API is also available for runtime model import. +TensorFlow formats, then you should use one of the classes from :doc:`Frontend Extension API `, +the application of which is described below. -If you are implementing extensions for new ONNX, PaddlePaddle, TensorFlow Lite or TensorFlow frontends and plan to use the ``--extensions`` -option in Model Optimizer for model conversion, then the extensions should be: - -1. Implemented in C++ only. - -2. Compiled as a separate shared library (see details on how to do this further in this guide). - -Model Optimizer does not support new frontend extensions written in Python API. - -Remaining part of this guide describes application of Frontend Extension API for new frontends. Registering Extensions ###################### @@ -104,7 +92,7 @@ Extensions can be loaded from a code with the ``ov::Core::add_extension`` metho :fragment: [add_extension] -The ``Identity`` is a custom operation class defined in :doc:`Custom Operation Guide `. This is sufficient to enable reading OpenVINO IR which uses the ``Identity`` extension operation emitted by Model Optimizer. In order to load original model directly to the runtime, add a mapping extension: +The ``Identity`` is a custom operation class defined in :doc:`Custom Operation Guide `. This is sufficient to enable reading OpenVINO IR which uses the ``Identity`` extension operation. In order to load original model directly to the runtime, add a mapping extension: .. tab-set:: @@ -133,11 +121,11 @@ Create a Library with Extensions An extension library should be created in the following cases: -* Conversion of a model with custom operations in Model Optimizer. +* Conversion of a model with custom operations in model conversion API * Loading a model with custom operations in a Python application. This applies to both framework model and OpenVINO IR. * Loading models with custom operations in tools that support loading extensions from a library, for example the ``benchmark_app``. -To create an extension library, for example, to load the extensions into Model Optimizer, perform the following: +To create an extension library, perform the following: 1. Create an entry point for extension library. OpenVINO provides the ``OPENVINO_CREATE_EXTENSIONS()`` macro, which allows to define an entry point to a library with OpenVINO Extensions. This macro should have a vector of all OpenVINO Extensions as an argument. diff --git a/docs/articles_en/documentation/openvino-extensibility/custom-gpu-operations.rst b/docs/articles_en/documentation/openvino-extensibility/custom-gpu-operations.rst index 92914223ac123c..9717c6c8ac4e33 100644 --- a/docs/articles_en/documentation/openvino-extensibility/custom-gpu-operations.rst +++ b/docs/articles_en/documentation/openvino-extensibility/custom-gpu-operations.rst @@ -40,8 +40,8 @@ There are two options for using the custom operation configuration file: :fragment: [part0] -All OpenVINO samples, except the trivial ``hello_classification``, and most Open -Model Zoo demos feature a dedicated command-line option ``-c`` to load custom kernels. +All OpenVINO samples, except the trivial ``hello_classification``, +feature a dedicated command-line option ``-c`` to load custom kernels. For example, to load custom operations for the classification sample, run the command below: .. code-block:: cpp @@ -49,11 +49,6 @@ For example, to load custom operations for the classification sample, run the co $ ./classification_sample -m /bvlc_alexnet_fp16.xml -i ./validation_set/daily/227x227/apron.bmp -d GPU -c /custom_layer_example.xml -.. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - .. _config-file-format: @@ -393,3 +388,7 @@ execution ends. For more information, refer to the `printf Function `__. +Additional Resources +#################### + +* Models in the OpenVINO IR format published on `Hugging Face `__. diff --git a/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst b/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst index 115f149657821c..08b7c6f6b98018 100644 --- a/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst +++ b/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst @@ -14,9 +14,6 @@ Refer to :doc:`Introduction to OpenVINO Extension <../openvino-extensibility>` t understand the entire flow. This API is applicable to new frontends only, which exist for ONNX, TensorFlow Lite, PaddlePaddle, and TensorFlow. -If a different model format is used, follow legacy -:doc:`Model Optimizer Extensions <../legacy-features/transition-legacy-conversion-api/legacy-model-optimizer-extensibility>` -guide. .. note:: diff --git a/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library/advanced-guides/low-precision-transformations.rst b/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library/advanced-guides/low-precision-transformations.rst index 9451fabd6219d8..4b64b2177af361 100644 --- a/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library/advanced-guides/low-precision-transformations.rst +++ b/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library/advanced-guides/low-precision-transformations.rst @@ -312,17 +312,11 @@ This step is optional. It modifies the transformation function to a device-speci Result model overview ##################### -Let's explore quantized `TensorFlow implementation of ResNet-50 `__ model. Use `Model Downloader `__ tool to download the ``fp16`` model from `OpenVINO™ Toolkit - Open Model Zoo repository `__: - -.. code-block:: sh - - omz_downloader --name resnet-50-tf --precisions FP16-INT8 - -After that you should quantize model by the `Model Quantizer `__ tool. - -.. code-block:: sh - - omz_quantizer --model_dir public/resnet-50-tf --dataset_dir --precisions=FP16-INT8 +Let's explore the resnet-50-tf model, quantized to ``fp16``, which is a TensorFlow +implementation of `ResNet-50 `__ +- an image classification model pre-trained on the ImageNet dataset. Originally +redistributed in the "Saved model" format, converted to a frozen graph using the +"tf.graph_util" module. Inference @@ -346,7 +340,7 @@ Result model depends on different factors: Information about layer precision is stored in the performance counters that are -available from the OpenVINO Runtime API. For example, the part of performance counters table for quantized `TensorFlow implementation of ResNet-50 `__ model inference on CPU Plugin looks as follows: +available from the OpenVINO Runtime API. For example, the part of performance counters table for the resnet-50-tf model inferred on CPU Plugin looks as follows: .. list-table:: :header-rows: 1 diff --git a/docs/articles_en/documentation/openvino-security.rst b/docs/articles_en/documentation/openvino-security.rst index 255dbbd2b62c35..03a99ba49e89e2 100644 --- a/docs/articles_en/documentation/openvino-security.rst +++ b/docs/articles_en/documentation/openvino-security.rst @@ -84,6 +84,6 @@ Additional Resources #################### - Intel® Distribution of OpenVINO™ toolkit `home page `__. -- :doc:`Convert a Model `. +- :doc:`Convert a Model <../openvino-workflow/model-preparation/convert-model-to-ir>`. - :doc:`OpenVINO™ Runtime User Guide <../openvino-workflow/running-inference>`. - For more information on Sample Applications, see the :doc:`OpenVINO Samples Overview <../learn-openvino/openvino-samples>` diff --git a/docs/articles_en/get-started.rst b/docs/articles_en/get-started.rst index 28a39d3c0a4e84..9b46cc416605f3 100644 --- a/docs/articles_en/get-started.rst +++ b/docs/articles_en/get-started.rst @@ -62,14 +62,14 @@ OpenVINO provides a wide array of examples and documentation showing how to work OpenVINO Basics +++++++++++++++ -Learn the basics of working with models and inference in OpenVINO. Begin with “Hello World” Interactive Tutorials that show how to prepare models, run inference, and retrieve results using the OpenVINO API. Then, explore other examples from the Open Model Zoo and OpenVINO Code Samples that can be adapted for your own application. +Learn the basics of working with models and inference in OpenVINO. Begin with “Hello World” Interactive Tutorials that show how to prepare models, run inference, and retrieve results using the OpenVINO API. Then, explore OpenVINO Code Samples that can be adapted for your own application. .. _interactive-learn-openvino/interactive-tutorials-python: Interactive Tutorials - Jupyter Notebooks ----------------------------------------- -Start with :doc:`interactive Python ` that show the basics of model inferencing, the OpenVINO API, how to convert models to OpenVINO format, and more. +Start with :doc:`interactive Python ` that show the basics of model inference, the OpenVINO API, how to convert models to OpenVINO format, and more. * `Hello Image Classification `__ - Load an image classification model in OpenVINO and use it to apply a label to an image * `OpenVINO Runtime API Tutorial `__ - Learn the basic Python API for working with models in OpenVINO diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 29547d5b0fc2e5..7603adf37b7e89 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -38,20 +38,7 @@ All currently supported versions are: :doc:`Install OpenVINO GenAI Flavor <../learn-openvino/llm_inference_guide/genai-guide>` and :doc:`Run LLMs with OpenVINO GenAI Flavor <../learn-openvino/llm_inference_guide/genai-guide>`. -.. dropdown:: Deprecation of OpenVINO™ Development Tools Package - - The OpenVINO™ Development Tools package has been deprecated and removed from the default - installation options. For new projects, the OpenVINO runtime package now includes - all necessary components. - - The OpenVINO Development Tools is still available for older versions of OpenVINO, - as well as the current one, from the GitHub repository and PyPI. :doc:`Learn more <../documentation/legacy-features/install-dev-tools>`. - .. dropdown:: Building OpenVINO from Source OpenVINO Toolkit source files are available on GitHub as open source. If you want to build your own version of OpenVINO for your platform, follow the `OpenVINO Build Instructions `__. - - - - diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst b/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst index 20965f2f22d095..77b23ca9b2d6a4 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst @@ -277,4 +277,4 @@ Additional Resources * Converting models for use with OpenVINO™: :doc:`Convert a Model <../../../openvino-workflow/model-preparation>` * Writing your own OpenVINO™ applications: :doc:`OpenVINO™ Runtime User Guide <../../../openvino-workflow/running-inference>` * Sample applications: :doc:`OpenVINO™ Toolkit Samples Overview <../../../learn-openvino/openvino-samples>` -* Pre-trained deep learning models: :doc:`Overview of OpenVINO™ Toolkit Pre-Trained Models <../../../documentation/legacy-features/model-zoo>` +* Pre-trained deep learning models on `Hugging Face `__. diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst b/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst index e4bff378106122..b02d7f4f1984fc 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst @@ -190,4 +190,4 @@ Additional Resources * :doc:`Convert models for use with OpenVINO™ <../../../openvino-workflow/model-preparation/convert-model-to-ir>` * :doc:`Write your own OpenVINO™ applications <../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` * Sample applications: :doc:`OpenVINO™ Toolkit Samples Overview <../../../learn-openvino/openvino-samples>` -* Pre-trained deep learning models: :doc:`Overview of OpenVINO™ Toolkit Pre-Trained Models <../../../documentation/legacy-features/model-zoo>` +* Pre-trained deep learning models on `Hugging Face `__ diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst b/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst index 9db280ec81472e..bdcd89d6b195b1 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst @@ -213,4 +213,4 @@ Additional Resources * :doc:`Convert models for use with OpenVINO™ <../../../openvino-workflow/model-preparation/convert-model-to-ir>` * :doc:`Write your own OpenVINO™ applications <../../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` * Sample applications: :doc:`OpenVINO™ Toolkit Samples Overview <../../../learn-openvino/openvino-samples>` -* Pre-trained deep learning models: :doc:`Overview of OpenVINO™ Toolkit Pre-Trained Models <../../../documentation/legacy-features/model-zoo>` +* Pre-trained deep learning models on `Hugging Face `__. diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-brew.rst b/docs/articles_en/get-started/install-openvino/install-openvino-brew.rst index b1710f3bb358e8..612a873e4ff5ed 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-brew.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-brew.rst @@ -59,14 +59,7 @@ Now that you've installed OpenVINO Runtime, you can try the following things: * Learn more about :doc:`OpenVINO Workflow <../../../openvino-workflow>`. * To prepare your models for working with OpenVINO, see :doc:`Model Preparation <../../../openvino-workflow/model-preparation>`. -* See pre-trained deep learning models in our - :doc:`Open Model Zoo <../../../documentation/legacy-features/model-zoo>`. - - .. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - +* See pre-trained deep learning models on `Hugging Face `__. * Learn more about :doc:`Inference with OpenVINO Runtime <../../../openvino-workflow/running-inference>`. * See sample applications in :doc:`OpenVINO toolkit Samples Overview <../../../learn-openvino/openvino-samples>`. * Check out the OpenVINO `product home page `__. diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-conda.rst b/docs/articles_en/get-started/install-openvino/install-openvino-conda.rst index d1392d3f46a513..df3c8c7e0dc53b 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-conda.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-conda.rst @@ -108,7 +108,6 @@ components by using: - ``libopenvino-pytorch-frontend`` - ``libopenvino-tensorflow-frontend`` - ``libopenvino-tensorflow-lite-frontend`` -- ``libopenvino-dev`` - ``libopenvino-python`` - ``libopenvino-arm-cpu-plugin`` diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-vcpkg.rst b/docs/articles_en/get-started/install-openvino/install-openvino-vcpkg.rst index af9fe85528ca5d..6d739b350f5b38 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-vcpkg.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-vcpkg.rst @@ -81,13 +81,7 @@ Now that you've installed OpenVINO Runtime, you can try the following things: * Learn more about :doc:`OpenVINO Workflow <../../../openvino-workflow>`. * To prepare your models for working with OpenVINO, see :doc:`Model Preparation <../../../openvino-workflow/model-preparation>`. -* See pre-trained deep learning models in our :doc:`Open Model Zoo <../../../documentation/legacy-features/model-zoo>`. - - .. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - +* See pre-trained deep learning models on `Hugging Face `__. * Learn more about :doc:`Inference with OpenVINO Runtime <../../../openvino-workflow/running-inference>`. * See sample applications in :doc:`OpenVINO toolkit Samples Overview <../../../learn-openvino/openvino-samples>`. * Check out the OpenVINO `product home page `__ . diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-yum.rst b/docs/articles_en/get-started/install-openvino/install-openvino-yum.rst index 970bb47a095d5b..fc413f194a1e63 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-yum.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-yum.rst @@ -190,13 +190,7 @@ You can also try the following things: * Learn more about :doc:`OpenVINO Workflow <../../../openvino-workflow>`. * To prepare your models for working with OpenVINO, see :doc:`Model Preparation <../../../openvino-workflow/model-preparation>`. -* See pre-trained deep learning models in our :doc:`Open Model Zoo <../../../documentation/legacy-features/model-zoo>`. - - .. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - +* See pre-trained deep learning models on `Hugging Face `__. * Learn more about :doc:`Inference with OpenVINO Runtime <../../../openvino-workflow/running-inference>`. * See sample applications in :doc:`OpenVINO toolkit Samples Overview <../../../learn-openvino/openvino-samples>`. * Take a glance at the OpenVINO `product home page `__ . diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-zypper.rst b/docs/articles_en/get-started/install-openvino/install-openvino-zypper.rst index 127b26cac0590f..bc589dfdb48a8b 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-zypper.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-zypper.rst @@ -142,13 +142,7 @@ You can also try the following things: * Learn more about :doc:`OpenVINO Workflow <../../../openvino-workflow>`. * To prepare your models for working with OpenVINO, see :doc:`Model Preparation <../../../openvino-workflow/model-preparation>`. -* See pre-trained deep learning models in our :doc:`Open Model Zoo <../../../documentation/legacy-features/model-zoo>`. - - .. important:: - - Due to the deprecation of Open Model Zoo, models in the OpenVINO IR format are now - published on `Hugging Face `__. - +* See pre-trained deep learning models on `Hugging Face `__. * Learn more about :doc:`Inference with OpenVINO Runtime <../../../openvino-workflow/running-inference>`. * See sample applications in :doc:`OpenVINO toolkit Samples Overview <../../../learn-openvino/openvino-samples>`. * Take a glance at the OpenVINO `product home page `__ . diff --git a/docs/articles_en/learn-openvino/interactive-tutorials-python/notebooks-installation.rst b/docs/articles_en/learn-openvino/interactive-tutorials-python/notebooks-installation.rst index eb02caa06852fd..ba7859a0c9f5d1 100644 --- a/docs/articles_en/learn-openvino/interactive-tutorials-python/notebooks-installation.rst +++ b/docs/articles_en/learn-openvino/interactive-tutorials-python/notebooks-installation.rst @@ -312,8 +312,6 @@ Installing notebooks 1. **Create a Virtual Environment** - If you already have installed *openvino-dev*, you may skip this step and proceed with the next one. - .. code-block:: sh python -m venv openvino_env @@ -364,8 +362,6 @@ Installing notebooks 1. **Create a Virtual Environment** - If you already have installed *openvino-dev*, you may skip this step and proceed with the next one. - .. code-block:: sh python3 -m venv openvino_env @@ -415,8 +411,6 @@ Installing notebooks 1. **Create a Virtual Environment** - If you already have installed *openvino-dev*, you may skip this step and proceed with the next one. - .. code-block:: sh python3 -m venv openvino_env diff --git a/docs/articles_en/learn-openvino/llm_inference_guide.rst b/docs/articles_en/learn-openvino/llm_inference_guide.rst index e1d643648b4be5..372c3b6d652bfc 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide.rst +++ b/docs/articles_en/learn-openvino/llm_inference_guide.rst @@ -12,7 +12,6 @@ Generative AI workflow Generative Model Preparation Inference with OpenVINO GenAI Inference with Optimum Intel - Generative AI with Base OpenVINO (not recommended) OpenVINO Tokenizers @@ -90,5 +89,6 @@ Proceed to guides on: * :doc:`OpenVINO GenAI Flavor <./llm_inference_guide/genai-guide>` * :doc:`Hugging Face and Optimum Intel <./llm_inference_guide/llm-inference-hf>` +* `Generative AI with Base OpenVINO `__ diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-native-ov.rst b/docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-native-ov.rst deleted file mode 100644 index d33ae05f68f462..00000000000000 --- a/docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-native-ov.rst +++ /dev/null @@ -1,192 +0,0 @@ -Generative AI with Base OpenVINO (not recommended) -=============================================================================================== - -To run Generative AI models using native OpenVINO APIs you need to follow regular -**Convert -> Optimize -> Deploy** path with a few simplifications. - -To convert a model from `Hugging Face `__, you can use -Optimum-Intel export feature that allows you to export model in the OpenVINO format without -invoking conversion API and tools directly. In this case, the conversion process is a bit -more simplified. You can still use a regular conversion path if the model comes from -outside of Hugging Face ecosystem, i.e., in source framework format (PyTorch, etc.) - -Model optimization can be performed within Hugging Face or directly using NNCF as described in -:doc:`Weight Compression <../../openvino-workflow/model-optimization-guide/weight-compression>`. - -.. note:: - - It is recommended to use models in 4-bit precision, as maintaining the model in its - original precision may result in significantly decreased performance. - -Inference code that uses native API cannot benefit from Hugging Face pipelines. -You need to write your custom code or take it from the available examples. Below are -some examples of popular Generative AI scenarios: - -* In case of LLMs for text generation, you need to handle tokenization, inference and - token selection loop, and de-tokenization. If token selection involves beam search, - it also needs to be written. -* For image generation models, you need to make a pipeline that includes several model - inferences: inference for source (e.g., text) encoder models, inference loop for - diffusion process and inference for the decoding part. Scheduler code is also required. - -To write such pipelines, you can follow the examples provided as part of OpenVINO: - -* `OpenVINO Latent Consistency Model C++ image generation pipeline `__ -* `OpenVINO Stable Diffusion (with LoRA) C++ image generation pipeline `__ - -To perform inference, models must be first converted to OpenVINO IR format using -Hugging Face Optimum-Intel API. - -An inference pipeline for a text generation LLM is set up in the following stages: - -1. Read and compile the model in OpenVINO IR. -2. Pre-process text prompt with a tokenizer and set the result as model inputs. -3. Run token generation loop. -4. De-tokenize outputs. - -Prerequisites -######################## - -Linux operating system (as of the current version). - -**Installation** - -1. Create a virtual environment - - .. code-block:: python - - python -m venv openvino_llm - - ``openvino_llm`` is an example name; you can choose any name for your environment. - -2. Activate the virtual environment - - .. code-block:: python - - source openvino_llm/bin/activate - -3. Install OpenVINO tokenizers and dependencies - - .. code-block:: python - - pip install optimum[openvino] - - -Convert Hugging Face tokenizer and model to OpenVINO IR format -++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -**Convert Tokenizer** - -`OpenVINO Tokenizers `__ -come equipped with a CLI tool that facilitates the conversion of tokenizers -from either the Hugging Face Hub or those saved locally to the OpenVINO IR format: - -.. code-block:: python - - convert_tokenizer microsoft/Llama2-7b-WhoIsHarryPotter --with-detokenizer -o openvino_tokenizer - -In this example, the ``microsoft/Llama2-7b-WhoIsHarryPotter tokenizer`` is transformed from the Hugging -Face hub. You can substitute this tokenizer with one of your preference. You can also rename -the output directory (``openvino_tokenizer``). - -**Convert Model** - -The optimum-cli command can be used for converting a Hugging Face model to the OpenVINO IR model format. -Learn more in Loading an LLM with OpenVINO. - -.. code-block:: python - - optimum-cli export openvino --convert-tokenizer --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 openvino_model - -Full OpenVINO Text Generation Pipeline -###################################################################### - -1. Import and Compile Models -+++++++++++++++++++++++++++++++++++++++ - -Use the model and tokenizer converted from the previous step: - -.. code-block:: python - - import numpy as np - from openvino import compile_model - - # Compile the tokenizer, model, and detokenizer using OpenVINO. These files are XML representations of the models optimized for OpenVINO - compiled_tokenizer = compile_model("openvino_tokenizer.xml") - compiled_model = compile_model("openvino_model.xml") - compiled_detokenizer = compile_model("openvino_detokenizer.xml") - -2. Tokenize and Transform Input -+++++++++++++++++++++++++++++++++++++++ - -Tokenization is a mandatory step in the process of generating text using LLMs. Tokenization -converts the input text into a sequence of tokens, which are essentially the format that the -model can understand and process. The input text string must be tokenized and set up in the -structure expected by the model before running inference. - -.. code-block:: python - - text_input = ["Quick brown fox was"] - ov_input = compiled_tokenizer(text_input) - -3. Generate Tokens -+++++++++++++++++++++++++++++++++++++++ - -The core of text generation lies in the inference and token selection loop. In each iteration -of this loop, the model runs inference on the input sequence, generates and selects a new token, -and appends it to the existing sequence. - -.. code-block:: python - - # Define the number of new tokens to generate - new_tokens_size = 10 - - # Determine the size of the existing prompt - prompt_size = ov_input["input_ids"].shape[-1] - - # Prepare the input dictionary for the model - # It combines existing tokens with additional space for new tokens - input_dict = { - output.any_name: np.hstack([tensor, np.zeros(shape=(1, new_tokens_size), dtype=np.int_)]) - for output, tensor in ov_input.items() - } - - # Generate new tokens iteratively - for idx in range(prompt_size, prompt_size + new_tokens_size): - # Get output from the model - output = compiled_model(input_dict)["token_ids"] - # Update the input_ids with newly generated token - input_dict["input_ids"][:, idx] = output[:, idx - 1] - # Update the attention mask to include the new token - input_dict["attention_mask"][:, idx] = 1 - -4. Decode and Display Output -+++++++++++++++++++++++++++++++++++++++ - -The final step in the process is de-tokenization, where the sequence of token IDs generated by -the model is converted back into human-readable text. -This step is essential for interpreting the model's output. - -.. code-block:: python - - # Extract token IDs for the final output - ov_token_ids = input_dict["input_ids"] - # Decode the model output back to string - ov_output = compiled_detokenizer(ov_token_ids)["string_output"] - print(f"OpenVINO output string: `{ov_output}`") - -.. code-block:: python - - # Example output: - [' Quick brown fox was walking through the forest. He was looking for something'] - - -Additional Resources -#################### - -* `OpenVINO GenAI Repo `__ -* `OpenVINO Tokenizers `__ -* `Neural Network Compression Framework `__ -* :doc:`Stateful Models Low-Level Details <../../openvino-workflow/running-inference/stateful-models>` -* :doc:`Working with Textual Data <../../openvino-workflow/running-inference/string-tensors>` - diff --git a/docs/articles_en/learn-openvino/openvino-samples/benchmark-tool.rst b/docs/articles_en/learn-openvino/openvino-samples/benchmark-tool.rst index 390fe00605f2c6..8ab8a43031ca39 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/benchmark-tool.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/benchmark-tool.rst @@ -30,7 +30,7 @@ Basic Usage The benchmarking application works with models in the OpenVINO IR (``model.xml`` and ``model.bin``) and ONNX (``model.onnx``) formats. - Make sure to :doc:`convert your models <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` + Make sure to :doc:`convert your models <../../openvino-workflow/model-preparation/convert-model-to-ir>` if necessary. To run benchmarking with default options on a model, use the following command: @@ -56,7 +56,7 @@ Basic Usage The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, - OpenVINO also allows you to :doc:`convert your models <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>`. + OpenVINO also allows you to :doc:`convert your models <../../openvino-workflow/model-preparation/convert-model-to-ir>`. To run benchmarking with default options on a model, use the following command: @@ -937,4 +937,4 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` diff --git a/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst b/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst index 92f6a410219f43..13f18fc3272b34 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst @@ -7,8 +7,7 @@ Bert Benchmark Python Sample This sample demonstrates how to estimate performance of a Bert model using Asynchronous -Inference Request API. Unlike `demos `__ -this sample does not have +Inference Request API. This sample does not have configurable command line arguments. Feel free to modify sample's source code to try out different options. @@ -64,5 +63,5 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `Bert Benchmark Python Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst index f8222e495c7387..7a9a7d449d628d 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst @@ -93,11 +93,11 @@ To run the sample, you need to specify a model and an image: to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about - the argument, refer to **When to Reverse Input Channels** section of - :doc:`Embedding Preprocessing Computation <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes>`. + the argument, refer to the **Color Conversion** section of + :doc:`Preprocessing API <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/preprocessing-api-details>`. - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) - using the :doc:`model conversion API <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>`. + using the :doc:`model conversion API <../../openvino-workflow/model-preparation/convert-model-to-ir>`. - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - The sample supports NCHW model layout only. @@ -257,7 +257,7 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `OpenVINO Runtime C API `__ - `Hello Classification Python Sample on Github `__ - `Hello Classification C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst index 19219070cbfbe2..3d1c069e2c8cb1 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst @@ -95,11 +95,11 @@ the following command, you can convert an ordinary image to an uncompressed NV12 - By default, this sample expects that model input has BGR channels order. If you trained your model to work with RGB order, you need to reconvert your model using model conversion API with ``reverse_input_channels`` argument - specified. For more information about the argument, refer to **When to Reverse - Input Channels** section of :doc:`Embedding Preprocessing Computation <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes>`. + specified. For more information about the argument, refer to the + **Color Conversion** section of :doc:`Preprocessing API <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/preprocessing-api-details>`. - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) - using the :doc:`model conversion API <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>`. + using the :doc:`model conversion API <../../openvino-workflow/model-preparation/convert-model-to-ir>`. - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. Example @@ -208,7 +208,7 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `API Reference `__ - `Hello NV12 Input Classification C++ Sample on Github `__ - `Hello NV12 Input Classification C Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst index 23de8eb1979824..0e929bb5ed2701 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst @@ -14,8 +14,8 @@ using the sample, refer to the following requirements: - Models with only one input and output are supported. - The sample accepts any file format supported by ``core.read_model``. -- The sample has been validated with: `person-detection-retail-0013 `__ - models and the NCHW layout format. +- The sample has been validated with the person-detection-retail-0013 + model and the NCHW layout format. - To build the sample, use instructions available at :ref:`Build the Sample Applications ` section in "Get Started with Samples" guide. @@ -82,12 +82,12 @@ To run the sample, you need to specify a model and an image: order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` - argument specified. For more information about the argument, refer to - **When to Reverse Input Channels** section of - :doc:`Embedding Preprocessing Computation <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes>`. + argument specified. For more information about the argument, refer to the + **Color Conversion** section of + :doc:`Preprocessing API <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/preprocessing-api-details>`. - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) - using :doc:`model conversion API <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>`. + using :doc:`model conversion API <../../openvino-workflow/model-preparation/convert-model-to-ir>`. - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. Example @@ -204,7 +204,7 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `Hello Reshape SSD Python Sample on Github `__ - `Hello Reshape SSD C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst b/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst index b112452e932c72..d88b950463210d 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst @@ -129,9 +129,9 @@ To run the sample, you need to specify a model and an image: .. note:: - - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes>`. + - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to the **Color Conversion** section of :doc:`Preprocessing API <../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/preprocessing-api-details>`. - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>`. + - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API <../../openvino-workflow/model-preparation/convert-model-to-ir>`. - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. @@ -326,6 +326,6 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO™ Toolkit Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `Image Classification Async Python Sample on Github `__ - `Image Classification Async C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst b/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst index e0e3034c225763..ad01cee53a69b1 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst @@ -76,7 +76,7 @@ To run the sample, you need to specify model weights and a device. - This sample supports models with FP32 weights only. - The ``lenet.bin`` weights file is generated by - :doc:`model conversion API <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` + :doc:`model conversion API <../../openvino-workflow/model-preparation/convert-model-to-ir>` from the public LeNet model, with the ``input_shape [64,1,28,28]`` parameter specified. - The original model is available in the `Caffe repository `__ on GitHub. @@ -292,6 +292,6 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `Model Creation Python Sample on Github `__ - `Model Creation C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst b/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst index 245672decb7ab2..ccaa1f03a35552 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst @@ -8,15 +8,13 @@ Sync Benchmark Sample This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency -oriented scenarios. Models with static input shapes are supported. Unlike -`demos `__ -this sample does not have other configurable command-line +oriented scenarios. Models with static input shapes are supported. +This sample does not have other configurable command-line arguments. Feel free to modify sample's source code to try out different options. Before using the sample, refer to the following requirements: - The sample accepts any file format supported by ``core.read_model``. -- The sample has been validated with: `yolo-v3-tf `__, - `face-detection-0200 `__ models. +- The sample has been validated with: the yolo-v3-tf and face-detection-0200 models. - To build the sample, use instructions available at :ref:`Build the Sample Applications ` section in "Get Started with Samples" guide. @@ -167,6 +165,6 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `Sync Benchmark Python Sample on Github `__ - `Sync Benchmark C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst b/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst index e8b723afd2a480..4632fab82bd0ea 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst @@ -7,7 +7,7 @@ Throughput Benchmark Sample This sample demonstrates how to estimate performance of a model using Asynchronous -Inference Request API in throughput mode. Unlike `demos `__ this sample +Inference Request API in throughput mode. This sample does not have other configurable command-line arguments. Feel free to modify sample's source code to try out different options. @@ -18,8 +18,7 @@ sets ``uint8``, while the sample uses default model precision which is usually ` Before using the sample, refer to the following requirements: - The sample accepts any file format supported by ``core.read_model``. -- The sample has been validated with: `yolo-v3-tf `__, - `face-detection-0200 `__ models. +- The sample has been validated with: yolo-v3-tf and face-detection-0200 models. - To build the sample, use instructions available at :ref:`Build the Sample Applications ` section in "Get Started with Samples" guide. @@ -171,6 +170,6 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` -- :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` +- :doc:`Convert a Model <../../openvino-workflow/model-preparation/convert-model-to-ir>` - `Throughput Benchmark Python Sample on Github `__ - `Throughput Benchmark C++ Sample on Github `__ diff --git a/docs/articles_en/openvino-workflow/model-preparation.rst b/docs/articles_en/openvino-workflow/model-preparation.rst index c23540874e9b7a..33a4d8a54cc7f6 100644 --- a/docs/articles_en/openvino-workflow/model-preparation.rst +++ b/docs/articles_en/openvino-workflow/model-preparation.rst @@ -56,12 +56,6 @@ The easiest way to obtain a model is to download it from an online database, suc .. note:: - Model conversion API prior to OpenVINO 2023.1 is considered deprecated. Existing and new - projects are recommended to transition to the new solutions, keeping in mind that they are - not fully backwards compatible with ``openvino.tools.mo.convert_model`` or the ``mo`` - CLI tool. For more details, see the - :doc:`Model Conversion API Transition Guide <../documentation/legacy-features/transition-legacy-conversion-api>`. - For PyTorch and JAX/Flax models, `Python API <#convert-a-model-with-python-convert-model>`__ is the only conversion option. @@ -298,15 +292,4 @@ follow: * :doc:`Post-training optimization ` * :doc:`Model inference in OpenVINO Runtime ` -If you are still using the legacy conversion API (``mo`` or ``openvino.tools.mo.convert_model``), -refer to the following materials: - -* :doc:`Transition from legacy mo and ov.tools.mo.convert_model <../documentation/legacy-features/transition-legacy-conversion-api>` -* :doc:`Legacy Model Conversion API <../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` - - - - .. need to investigate python api article generation - api/ie_python_api/_autosummary/openvino.Model.html does not exist, api/ie_python_api/_autosummary/openvino.runtime.Model.html does. - - diff --git a/docs/articles_en/openvino-workflow/model-preparation/convert-model-to-ir.rst b/docs/articles_en/openvino-workflow/model-preparation/convert-model-to-ir.rst index 560b013301e064..dd2fc35c56e92b 100644 --- a/docs/articles_en/openvino-workflow/model-preparation/convert-model-to-ir.rst +++ b/docs/articles_en/openvino-workflow/model-preparation/convert-model-to-ir.rst @@ -296,7 +296,7 @@ used by OpenVINO, typically obtained by converting models of supported framework * The ``convert_model()`` method: - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR can + You can use ``ovc`` to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. .. dropdown:: List of supported formats: @@ -423,7 +423,7 @@ used by OpenVINO, typically obtained by converting models of supported framework * The ``convert_model()`` method: - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR + You can use ``ovc`` to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. .. dropdown:: List of supported formats: @@ -557,7 +557,7 @@ used by OpenVINO, typically obtained by converting models of supported framework * The ``convert_model()`` method: - You can use ``mo`` command-line tool to convert a model to IR. The obtained IR + You can use ``ovc`` to convert a model to IR. The obtained IR can then be read by ``read_model()`` and inferred. .. dropdown:: List of supported formats: @@ -708,6 +708,6 @@ multiple times: Additional Resources #################### -* :doc:`Transition guide from the legacy to new conversion API <../../documentation/legacy-features/transition-legacy-conversion-api>` +* Learn about the :doc:`parameters to adjust model conversion <./conversion-parameters>`. * `Download models from Hugging Face `__. diff --git a/docs/articles_en/openvino-workflow/running-inference/dynamic-shapes.rst b/docs/articles_en/openvino-workflow/running-inference/dynamic-shapes.rst index 9de4ba9df18827..b9978f3767562e 100644 --- a/docs/articles_en/openvino-workflow/running-inference/dynamic-shapes.rst +++ b/docs/articles_en/openvino-workflow/running-inference/dynamic-shapes.rst @@ -139,7 +139,7 @@ To check if a model already has dynamic dimensions, first load it with the ``rea If the input model already has dynamic dimensions, that will not change during inference. If the inputs will not be used dynamically, it is recommended to set them to static values using the ``reshape`` method to save application memory and potentially improve inference speed. The OpenVINO API supports any combination of static and dynamic dimensions. -Static and dynamic dimensions can also be set when converting the model with ``convert_model()``. It has identical capabilities to the ``reshape`` method, so you can save time by converting the model with dynamic shapes beforehand rather than in the application code. To get information about setting input shapes using ``convert_model()``, refer to :doc:`Setting Input Shapes <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-setting-input-shapes>`. +Static and dynamic dimensions can also be set when converting the model with ``convert_model()``. It has identical capabilities to the ``reshape`` method, so you can save time by converting the model with dynamic shapes beforehand rather than in the application code. To get information about setting input shapes using ``convert_model()``, refer to :doc:`Setting Input Shapes <./changing-input-shape>`. Dimension Bounds ---------------- diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst index aa8e9cdabfda64..31d0af303c633a 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst @@ -31,7 +31,6 @@ different conditions: | :doc:`Automatic Device Selection (AUTO) ` | :doc:`Heterogeneous Execution (HETERO) ` | :doc:`Automatic Batching Execution (Auto-batching) ` -| :doc:`[DEPRECATED] Multi-Device Execution (MULTI) <../../documentation/legacy-features/multi-device>` To learn how to change the device configuration, read the :doc:`Query device properties article `. diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection.rst index 6bebf087052b75..a5ab0c845dfa66 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/auto-device-selection.rst @@ -513,7 +513,6 @@ Additional Resources * `Automatic Device Selection with OpenVINO™ Notebook `__ * :doc:`Debugging AUTO ` -* :doc:`(LEGACY) Running on Multiple Devices Simultaneously <../../../documentation/legacy-features/multi-device>` * :doc:`Inference Devices and Modes <../inference-devices-and-modes>` diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst index b4e1c7ac15afcc..2adf3e7f9d1e4d 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst @@ -124,7 +124,7 @@ Selected precision of each primitive depends on the operation precision in IR, q The ``u1``/``u8``/``i8`` data types are used for quantized operations only, which means that they are not selected automatically for non-quantized operations. For more details on how to get a quantized model, refer to the :doc:`Model Optimization guide <../../model-optimization>`. -Floating-point precision of a GPU primitive is selected based on operation precision in the OpenVINO IR, except for the :doc:``, which is executed in the ``f16`` precision. +Floating-point precision of a GPU primitive is selected based on operation precision in the OpenVINO IR, except for the :doc:``, which is executed in the ``f16`` precision. .. note:: diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst index 436d383ebf787e..2ba25507802288 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst @@ -250,11 +250,11 @@ or **ov::intel_npu::max_tiles and ov::intel_npu::tiles** -the ``max_tiles`` property is read-write to enable compiling models off-device. +the ``max_tiles`` property is read-write to enable compiling models off-device. When on NPU, ``max_tiles`` will return the number of tiles the device has. Setting the number of tiles to compile for (via ``intel_npu::tiles``), when on device, -must be preceded by reading ``intel_npu::max_tiles`` first, to make sure that -``ov::intel_npu::tiles`` <= ``ov::intel_npu::max_tiles`` +must be preceded by reading ``intel_npu::max_tiles`` first, to make sure that +``ov::intel_npu::tiles`` <= ``ov::intel_npu::max_tiles`` to avoid exceptions from the compiler. .. note:: @@ -281,7 +281,3 @@ Additional Resources * `Working with NPUs in OpenVINO™ Notebook `__ * `Vision colorization Notebook <./../../../notebooks/vision-image-colorization-with-output.html>`__ -* `Classification Benchmark C++ Demo `__ -* `3D Human Pose Estimation Python Demo `__ -* `Object Detection C++ Demo `__ -* `Object Detection Python Demo `__ diff --git a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/general-optimizations.rst b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/general-optimizations.rst index b8ec2da9235fd4..5f01623d248755 100644 --- a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/general-optimizations.rst +++ b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/general-optimizations.rst @@ -18,7 +18,7 @@ Inputs Pre-Processing with OpenVINO In many cases, a network expects a pre-processed image. It is advised not to perform any unnecessary steps in the code: -* Model conversion API can efficiently incorporate the mean and normalization (scale) values into a model (for example, to the weights of the first convolution). For more details, see the :doc:`relevant model conversion API command-line parameters <../../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation>`. +* Model conversion API can efficiently incorporate the mean and normalization (scale) values into a model (for example, to the weights of the first convolution). For more details, see the :doc:`relevant model conversion API command-line parameters <../../../openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/preprocessing-api-details>`. * Let OpenVINO accelerate other means of :doc:`Image Pre-processing and Conversion ` * Data which is already in the "on-device" memory can be input directly by using the :doc:`remote tensors API of the GPU Plugin <../inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin>`. @@ -60,7 +60,7 @@ Below are example-codes for the regular and async-based approaches to compare: The technique can be generalized to any available parallel slack. For example, you can do inference and simultaneously encode the resulting or previous frames or run further inference, like emotion detection on top of the face detection results. -Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample <../../../learn-openvino/openvino-samples/benchmark-tool>` for complete examples of the Async API in action. +Refer to the :doc:`Benchmark App Sample <../../../learn-openvino/openvino-samples/benchmark-tool>` for complete examples of the Async API in action. .. note:: diff --git a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview.rst b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview.rst index 690b606ff3720a..1562165916e576 100644 --- a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview.rst +++ b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimize-preprocessing/layout-api-overview.rst @@ -23,7 +23,6 @@ Below is a list of cases where input/output layout is important: * :doc:`Convert to OpenVINO <../../../model-preparation/convert-model-to-ir>` * `OpenVINO Model Conversion Tutorial `__ - * :doc:`[LEGACY] Model Optimizer Embedding Preprocessing Computation <../../../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api/[legacy]-embedding-preprocessing-computation>` guide. * Improving the readability of a model input and output. diff --git a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-throughput.rst b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-throughput.rst index 18c18c5f7d05b8..8aafd9ceb4faec 100644 --- a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-throughput.rst +++ b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-throughput.rst @@ -63,18 +63,7 @@ In general, most throughput-oriented inference applications should: * Use the Async API with callbacks, to avoid any dependency on the completion order of the requests and possible device starvation, as explained in the :doc:`common-optimizations section `. -Multi-Device Execution -###################### - -OpenVINO offers the automatic, scalable :doc:`multi-device inference mode <../../../documentation/legacy-features/multi-device>`, which is a simple *application-transparent* way to improve throughput. There is no need to re-architecture existing applications for any explicit multi-device support: no explicit network loading to each device, no separate per-device queues, no additional logic to balance inference requests between devices, etc. For the application using it, multi-device is like any other device, as it manages all processes internally. -Just like with other throughput-oriented scenarios, there are several major pre-requisites for optimal multi-device performance: - -* Using the :ref:`Asynchronous API ` and :doc:`callbacks <../integrate-openvino-with-your-application/inference-request>` in particular. -* Providing the multi-device (and hence the underlying devices) with enough data to crunch. As the inference requests are naturally independent data pieces, the multi-device performs load-balancing at the "requests" (outermost) level to minimize the scheduling overhead. - -Keep in mind that the resulting performance is usually a fraction of the "ideal" (plain sum) value, when the devices compete for certain resources such as the memory-bandwidth, which is shared between CPU and iGPU. - .. note:: - While the legacy approach of optimizing the parameters of each device separately works, the :doc:`Automatic Device Selection <../inference-devices-and-modes/auto-device-selection>` allow configuring all devices (that are part of the specific multi-device configuration) at once. + The :doc:`Automatic Device Selection <../inference-devices-and-modes/auto-device-selection>` allows configuration of all devices at once. diff --git a/docs/dev/build_mac_arm.md b/docs/dev/build_mac_arm.md index 5a1a3698568f95..8b9781e46a5c96 100644 --- a/docs/dev/build_mac_arm.md +++ b/docs/dev/build_mac_arm.md @@ -14,14 +14,14 @@ The software was validated on: - [brew](https://brew.sh) package manager to install additional dependencies. Use [install brew](https://brew.sh) guide to achieve this. - Installation step for python and python libraries varies depending on the host architecture: - - **arm64** Python 3.9 - 3.12 for the OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others): + - **arm64** Python 3.9 - 3.12 for the OpenVINO Runtime Python API: ```sh % # let's have a look what python versions are available in brew % brew search python % # select preferred version of python based on available ones, e.g. 3.11 % brew install python@3.11 ``` - - **x86_64** Select universal2 installer from [Python releases](https://www.python.org/downloads/macos/) download page and install `python-3.X.Y-macos11.pkg` image. This allows to have universal python libraries, build x86_64 OpenVINO Python API and Development tools. + - **x86_64** Select universal2 installer from [Python releases](https://www.python.org/downloads/macos/) download page and install `python-3.X.Y-macos11.pkg` image. This allows you to have universal python libraries of OpenVINO Python API (build x86_64). - Clang compiler and other command line tools from Xcode 10.1 or higher: ```sh @@ -35,13 +35,13 @@ The software was validated on: ```sh % brew install tbb pugixml flatbuffers snappy protobuf ``` -- Additional `pip` dependencies to build OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others): +- Additional `pip` dependencies to build OpenVINO Runtime Python API: ```sh % # update pip and setuptools to newer versions % python3 -m pip install -U pip % python3 -m pip install -r /src/bindings/python/requirements.txt ``` - Additional install requirements (after OpenVINO repo clone) in order to build OpenVINO Python API and Development tools as wheel packages: + Additional install requirements (after OpenVINO repo clone) in order to build OpenVINO Python API as wheel packages: ```sh % python3 -m pip install -r /src/bindings/python/wheel/requirements-dev.txt ``` diff --git a/docs/dev/build_mac_intel_cpu.md b/docs/dev/build_mac_intel_cpu.md index f5b70d73709c20..735c8a97a3b3df 100644 --- a/docs/dev/build_mac_intel_cpu.md +++ b/docs/dev/build_mac_intel_cpu.md @@ -12,14 +12,14 @@ The software was validated on: - [brew](https://brew.sh) package manager to install additional dependencies. Use [install brew](https://brew.sh) guide to achieve this. - Installation step for python and python libraries varies depending on the host architecture: - - **x86_64** Python 3.9 - 3.12 for the OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others): + - **x86_64** Python 3.9 - 3.12 for the OpenVINO Runtime Python API: ```sh % # let's have a look what python versions are available in brew % brew search python % # select preferred version of python based on available ones, e.g. 3.11 % brew install python@3.11 ``` - - **arm64** Select universal2 installer from [Python releases](https://www.python.org/downloads/macos/) download page and install `python-3.X.Y-macos11.pkg` image. This allows to have universal python libraries, build x86_64 OpenVINO Python API and Development tools. + - **arm64** Select universal2 installer from [Python releases](https://www.python.org/downloads/macos/) download page and install `python-3.X.Y-macos11.pkg` image. This allows to have universal python libraries of OpenVINO Python API (build x86_64) . - [CMake](https://cmake.org/download/) 3.13 or higher and other development tools: ```sh % brew install cmake scons fdupes git-lfs ninja @@ -32,13 +32,13 @@ The software was validated on: ```sh % brew install tbb pugixml flatbuffers snappy protobuf ``` -- Additional `pip` dependencies to build OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others): +- Additional `pip` dependencies to build OpenVINO Runtime Python API: ```sh % # update pip and setuptools to newer versions % python3 -m pip install -U pip % python3 -m pip install -r /src/bindings/python/requirements.txt ``` - Additional install requirements (after OpenVINO repo clone) in order to build OpenVINO Python API and Development tools as wheel packages: + Additional install requirements (after OpenVINO repo clone) in order to build OpenVINO Python API: ```sh % python3 -m pip install -r /src/bindings/python/wheel/requirements-dev.txt ``` diff --git a/docs/dev/installing.md b/docs/dev/installing.md index de4c7ba9df9af6..c20b2ce183de3c 100644 --- a/docs/dev/installing.md +++ b/docs/dev/installing.md @@ -6,200 +6,87 @@ Once the project is built you can install OpenVINO™ Runtime into custom locati cmake --install --prefix ``` -## Installation check +## Build and Run Samples -

    -For versions prior to 2022.1 -

    +1. Build samples. -1. Obtaining Open Model Zoo tools and models + To build C++ sample applications, run the following commands: -To have the ability to run samples and demos, you need to clone the Open Model Zoo repository and copy the folder under `./deployment_tools` to your install directory: + Linux and macOS: + ```sh + cd /samples/cpp + ./build_samples.sh + ``` -``` -git clone https://github.com/openvinotoolkit/open_model_zoo.git -cmake -E copy_directory ./open_model_zoo/ /deployment_tools/open_model_zoo/ -``` - -2. Adding OpenCV to your environment - -Open Model Zoo samples use OpenCV functionality to load images. To use it for demo builds you need to provide the path to your OpenCV custom build by setting `OpenCV_DIR` environment variable and add path OpenCV libraries to the `LD_LIBRARY_PATH (Linux)` or `PATH (Windows)` variable before running demos. - -Linux: -```sh -export LD_LIBRARY_PATH=/path/to/opencv_install/lib/:$LD_LIBRARY_PATH -export OpenCV_DIR=/path/to/opencv_install/cmake -``` - -Windows: -```sh -set PATH=\path\to\opencv_install\bin\;%PATH% -set OpenCV_DIR=\path\to\opencv_install\cmake -``` - -3. Running demo - -To check your installation go to the demo directory and run Classification Demo: - -Linux and macOS: -```sh -cd /deployment_tools/demo -./demo_squeezenet_download_convert_run.sh -``` - -Windows: -```sh -cd \deployment_tools\demo -demo_squeezenet_download_convert_run.bat -``` - -Result: -``` -Top 10 results: + Windows Command Prompt: + ```sh + cd \samples\cpp + build_samples_msvc.bat + ``` -Image /deployment_tools/demo/car.png - -classid probability label -------- ----------- ----- -817 0.6853030 sports car, sport car -479 0.1835197 car wheel -511 0.0917197 convertible -436 0.0200694 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon -751 0.0069604 racer, race car, racing car -656 0.0044177 minivan -717 0.0024739 pickup, pickup truck -581 0.0017788 grille, radiator grille -468 0.0013083 cab, hack, taxi, taxicab -661 0.0007443 Model T - -[ INFO ] Execution successful -``` + Windows PowerShell: + ```sh + & /build_samples.ps1 + ``` -

    -
    +2. Download a model. + You can download an image classification model from + [Hugging Face](https://huggingface.co/models?pipeline_tag=image-classification&sort=trending) + to run the sample -
    - For 2022.1 and after -

    +4. Convert the model. -1. Build samples + Linux and macOS: + ```sh + ovc --compress_to_fp16=True + ``` + Windows: + ```bat + ovc --compress_to_fp16=True + ``` -To build C++ sample applications, run the following commands: +5. Run inference on the sample. -Linux and macOS: -```sh -cd /samples/cpp -./build_samples.sh -``` + Set up the OpenVINO environment variables: -Windows Command Prompt: -```sh -cd \samples\cpp -build_samples_msvc.bat -``` + Linux and macOS: + ```sh + source /setupvars.sh + ``` -Windows PowerShell: -```sh -& /build_samples.ps1 -``` + Windows Command Prompt: + ```bat + \setupvars.bat + ``` -2. Install OpenVINO Development Tools + Windows PowerShell: + ```bat + . /setupvars.ps1 + ``` -> **NOTE**: To build OpenVINO Development Tools (Model Optimizer, Post-Training Optimization Tool, Model Downloader, and Open Model Zoo tools) wheel package locally you are required to use the CMake option: `-DENABLE_WHEEL=ON`. + The following commands run the Image Classification Code Sample using the [`dog.bmp`](https://storage.openvinotoolkit.org/data/test_data/images/ 224x224/dog.bmp) file as an input image, the model in IR format, and on different hardware devices: -To install OpenVINO Development Tools to work with Caffe models (OpenVINO support for Caffe is currently being deprecated and will be removed entirely in the future), execute the following commands: + Linux and macOS: -Linux and macOS: + ```sh + cd ~/openvino_cpp_samples_build//Release + ./classification_sample_async -i /dog.bmp -m /model.xml -d CPU + ``` + where the is the output of ``uname -m``, for example, ``intel64``, ``armhf``, or ``aarch64``. -```sh -#setup virtual environment -python3 -m venv openvino_env -source openvino_env/bin/activate -pip install pip --upgrade + Windows: -#install local package from install directory -pip install openvino_dev--py3-none-any.whl[caffe] --find-links=/tools -``` - -Windows: -```bat -rem setup virtual environment -python -m venv openvino_env -openvino_env\Scripts\activate.bat -pip install pip --upgrade - -rem install local package from install directory -cd \tools -pip install openvino_dev--py3-none-any.whl[caffe] --find-links=\tools -``` - -3. Download the Models - -Download the following model to run the Image Classification Sample: - -Linux and macOS: -```sh -omz_downloader --name googlenet-v1 --output_dir ~/models -``` - -Windows: -```bat -omz_downloader --name googlenet-v1 --output_dir %USERPROFILE%\Documents\models -``` - -4. Convert the Model with Model Optimizer - -Linux and macOS: -```sh -mkdir ~/ir -mo --input_model ~/models/public/googlenet-v1/googlenet-v1.caffemodel --compress_to_fp16 --output_dir ~/ir -``` -Windows: -```bat -mkdir %USERPROFILE%\Documents\ir -mo --input_model %USERPROFILE%\Documents\models\public\googlenet-v1\googlenet-v1.caffemodel --compress_to_fp16 --output_dir %USERPROFILE%\Documents\ir -``` - -5. Run Inference on the Sample - -Set up the OpenVINO environment variables: - -Linux and macOS: -```sh -source /setupvars.sh -``` - -Windows Command Prompt: -```bat -\setupvars.bat -``` - -Windows PowerShell: -```bat -. /setupvars.ps1 -``` - -The following commands run the Image Classification Code Sample using the [`dog.bmp`](https://storage.openvinotoolkit.org/data/test_data/images/224x224/dog.bmp) file as an input image, the model in IR format from the `ir` directory, and on different hardware devices: - -Linux and macOS: - -```sh -cd ~/openvino_cpp_samples_build//Release -./classification_sample_async -i ~/Downloads/dog.bmp -m ~/ir/googlenet-v1.xml -d CPU -``` -where the is the output of ``uname -m``, for example, ``intel64``, ``armhf``, or ``aarch64``. - -Windows: - -```bat -cd %USERPROFILE%\Documents\Intel\OpenVINO\openvino_cpp_samples_build\\Release -.\classification_sample_async.exe -i %USERPROFILE%\Downloads\dog.bmp -m %USERPROFILE%\Documents\ir\googlenet-v1.xml -d CPU -``` -where the is either ``intel64`` or ``aarch64`` depending on the platform architecture. + ```bat + cd %USERPROFILE%\Documents\Intel\OpenVINO\openvino_cpp_samples_build\\Release + .\classification_sample_async.exe -i \dog.bmp -m \model.xml -d CPU + ``` + where the is either ``intel64`` or ``aarch64`` depending on the platform architecture. When the sample application is complete, you see the label and confidence data for the top 10 categories on the display: +Below are results of using the googlenet-v1 model. + ``` Top 10 results: @@ -220,36 +107,9 @@ classid probability ``` -

    -
    ## Adding OpenVINO Runtime to Your Project -
    -For versions prior to 2022.1 -

    - -For CMake projects, set the `InferenceEngine_DIR` and when you run CMake tool: - -```sh -cmake -DInferenceEngine_DIR=/path/to/openvino/build/ . -``` - -Then you can find Inference Engine by [`find_package`]: - -```cmake -find_package(InferenceEngine REQUIRED) -target_link_libraries(${PROJECT_NAME} PRIVATE ${InferenceEngine_LIBRARIES}) -``` -

    -
    - - -
    -For 2022.1 and after -

    - - For CMake projects, set the `OpenVINO_DIR` and when you run CMake tool: ```sh @@ -266,8 +126,6 @@ target_link_libraries(ov_app PRIVATE openvino::runtime) add_executable(ov_c_app main.c) target_link_libraries(ov_c_app PRIVATE openvino::runtime::c) ``` -

    -
    ## See also diff --git a/docs/optimization_guide/nncf/code/pruning_tf.py b/docs/optimization_guide/nncf/code/pruning_tf.py index 4d2f5018961365..76b76174dc7429 100644 --- a/docs/optimization_guide/nncf/code/pruning_tf.py +++ b/docs/optimization_guide/nncf/code/pruning_tf.py @@ -40,22 +40,22 @@ #! [distributed] #! [tune_model] -... # fine-tuning preparations, e.g. dataset, loss, optimizer setup, etc. +... # fine-tuning preparations, e.g. dataset, loss, optimization setup, etc. # create compression callbacks to control pruning parameters and dump compression statistics -# all the setting are being taked from compression_ctrl, i.e. from NNCF config +# all the setting are being taked from compression_ctrl, i.e. from NNCF config compression_callbacks = create_compression_callbacks(compression_ctrl, log_dir="./compression_log") # tune quantized model for 50 epochs as the baseline -model.fit(train_dataset, epochs=50, callbacks=compression_callbacks) +model.fit(train_dataset, epochs=50, callbacks=compression_callbacks) #! [tune_model] #! [export] compression_ctrl.export_model("compressed_model.pb") #export to Frozen Graph -#! [export] +#! [export] #! [save_checkpoint] -from nncf.tensorflow.utils.state import TFCompressionState +from nncf.tensorflow.utils.state import TFCompressionState from nncf.tensorflow.callbacks.checkpoint_callback import CheckpointManagerCallback checkpoint = tf.train.Checkpoint(model=model, diff --git a/docs/optimization_guide/nncf/code/pruning_torch.py b/docs/optimization_guide/nncf/code/pruning_torch.py index 6bc1cae4319406..6b637881b5cfc9 100644 --- a/docs/optimization_guide/nncf/code/pruning_torch.py +++ b/docs/optimization_guide/nncf/code/pruning_torch.py @@ -30,7 +30,7 @@ #! [nncf_congig] #! [wrap_model] -model = TorchModel() # instance of torch.nn.Module +model = TorchModel() # instance of torch.nn.Module compression_ctrl, model = create_compressed_model(model, nncf_config) #! [wrap_model] @@ -39,7 +39,7 @@ #! [distributed] #! [tune_model] -... # fine-tuning preparations, e.g. dataset, loss, optimizer setup, etc. +... # fine-tuning preparations, e.g. dataset, loss, optimization setup, etc. # tune quantized model for 50 epochs as the baseline for epoch in range(0, 50): @@ -52,7 +52,7 @@ #! [export] compression_ctrl.export_model("compressed_model.onnx") -#! [export] +#! [export] #! [save_checkpoint] checkpoint = { @@ -65,8 +65,8 @@ #! [load_checkpoint] resuming_checkpoint = torch.load(path_to_checkpoint) -compression_state = resuming_checkpoint['compression_state'] +compression_state = resuming_checkpoint['compression_state'] compression_ctrl, model = create_compressed_model(model, nncf_config, compression_state=compression_state) -state_dict = resuming_checkpoint['state_dict'] +state_dict = resuming_checkpoint['state_dict'] model.load_state_dict(state_dict) #! [load_checkpoint] diff --git a/docs/optimization_guide/nncf/code/qat_tf.py b/docs/optimization_guide/nncf/code/qat_tf.py index e210b963d5a8f6..d8a20958cfbcc2 100644 --- a/docs/optimization_guide/nncf/code/qat_tf.py +++ b/docs/optimization_guide/nncf/code/qat_tf.py @@ -20,8 +20,8 @@ #! [nncf_congig] #! [wrap_model] -model = KerasModel() # instance of the tensorflow.keras.Model -compression_ctrl, model = create_compressed_model(model, nncf_config) +model = KerasModel() # instance of the tensorflow.keras.Model +compression_ctrl, model = create_compressed_model(model, nncf_config) #! [wrap_model] #! [distributed] @@ -29,7 +29,7 @@ #! [distributed] #! [tune_model] -... # fine-tuning preparations, e.g. dataset, loss, optimizer setup, etc. +... # fine-tuning preparations, e.g. dataset, loss, optimization setup, etc. # create compression callbacks to control optimization parameters and dump compression statistics compression_callbacks = create_compression_callbacks(compression_ctrl, log_dir="./compression_log") @@ -39,10 +39,10 @@ #! [export] compression_ctrl.export_model("compressed_model.pb") #export to Frozen Graph -#! [export] +#! [export] #! [save_checkpoint] -from nncf.tensorflow.utils.state import TFCompressionState +from nncf.tensorflow.utils.state import TFCompressionState from nncf.tensorflow.callbacks.checkpoint_callback import CheckpointManagerCallback checkpoint = tf.train.Checkpoint(model=model, diff --git a/docs/optimization_guide/nncf/code/qat_torch.py b/docs/optimization_guide/nncf/code/qat_torch.py index f80a7e8f9aea9f..71594635cb84fd 100644 --- a/docs/optimization_guide/nncf/code/qat_torch.py +++ b/docs/optimization_guide/nncf/code/qat_torch.py @@ -7,7 +7,7 @@ #! [quantize] #! [tune_model] -... # fine-tuning preparations, e.g. dataset, loss, optimizer setup, etc. +... # fine-tuning preparations, e.g. dataset, loss, optimization setup, etc. # tune quantized model for 5 epochs as the baseline for epoch in range(0, 5): diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index e516c93cf18487..1f9ad9d2c2eb4a 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -12,4 +12,4 @@ To use the C++ benchmark_app, you must first build it following the [Build the S > **NOTE**: If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the [Benchmark Python Tool](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) is available, and you should follow the usage instructions on that page instead. -The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to [convert your models](https://docs.openvino.ai/2024/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.html). +The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to [convert your models](https://docs.openvino.ai/2024/documentation/openvino-workflow/model-preparation/convert-model-to-ir.html). diff --git a/src/bindings/python/docs/build.md b/src/bindings/python/docs/build.md index f824d9ccb8d82a..36aecd4350d2d5 100644 --- a/src/bindings/python/docs/build.md +++ b/src/bindings/python/docs/build.md @@ -18,14 +18,14 @@ To learn more about wheels and their use cases, check out the article [What Are OpenVINO can be built based on specific virtual environments such as [venv](https://docs.python.org/3/tutorial/venv.html), [virtualenv](https://virtualenv.pypa.io/en/latest/) or [pyenv](https://github.com/pyenv/pyenv). It is highly recommended to use virtual environments during development. They improve development process and allow better management of Python versions and packages. -*Note: Supported Python versions can be found in ["System Requirements" section](../../../../docs/install_guides/pypi-openvino-dev.md#system-requirements).* +*Note: Supported Python versions can be found in ["System Requirements"](https://docs.openvino.ai/nightly/about-openvino/release-notes-openvino/system-requirements.html).* ### Example: using pyenv with OpenVINO™ on Linux based system 1. First, set up the `pyenv` project. Please follow [official instructions of the pyenv project](https://github.com/pyenv/pyenv#installation) for any additional information. -2. Install a desired Python version. Following example will use Python in version 3.10.7. To correctly link libraries, an installed Python version must match OpenVINO™: +2. Install a desired Python version. Following example will use Python in version 3.10.7. To correctly link libraries, an installed Python version must match OpenVINO™: * Python with a shared library for a dynamically linked OpenVINO™: ```shell env PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install --verbose 3.10.7 diff --git a/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md index 2d254557c81e56..fec7f801d308d5 100644 --- a/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -11,4 +11,4 @@ For more detailed information on how this sample works, check the dedicated [art The Python benchmark_app is automatically installed when you install OpenVINO Developer Tools using [PyPI](https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html) Before running ``benchmark_app``, make sure the ``openvino_env`` virtual environment is activated, and navigate to the directory where your model is located. The benchmarking application works with models in the OpenVINO IR (``model.xml`` and ``model.bin``) and ONNX (``model.onnx``) formats. -Make sure to [convert your models](https://docs.openvino.ai/2024/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.html) if necessary. +Make sure to [convert your models](https://docs.openvino.ai/2024/openvino-workflow/model-preparation/convert-model-to-ir.html) if necessary. From afdb0e62d6fe46f2511d11e2b6a99a84ea4f7de7 Mon Sep 17 00:00:00 2001 From: Daniil Lyakhov Date: Wed, 18 Dec 2024 16:31:13 +0100 Subject: [PATCH 11/60] [Docs][torch.compile] NNCF quantization/compression (#27934) https://openvino-doc.iotg.sclab.intel.com/nncf-docs/openvino-workflow/torch-compile.html#model-quantization-and-weights-compression ### Details: - NNCF quantization / compression is added to `torch.compile` documentation --- .../openvino-workflow/torch-compile.rst | 94 +++++++++++++++---- 1 file changed, 75 insertions(+), 19 deletions(-) diff --git a/docs/articles_en/openvino-workflow/torch-compile.rst b/docs/articles_en/openvino-workflow/torch-compile.rst index e5bc0ca901a5aa..8c6016bfd4742f 100644 --- a/docs/articles_en/openvino-workflow/torch-compile.rst +++ b/docs/articles_en/openvino-workflow/torch-compile.rst @@ -310,10 +310,84 @@ officially. However, it can be accessed by running the following instructions: if sys.version_info >= (3, 11): `raise RuntimeError("Python 3.11+ not yet supported for torch.compile") +TorchServe Integration ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +TorchServe is a performant, flexible, and easy to use tool for serving PyTorch models in production. For more information on the details of TorchServe, +you can refer to `TorchServe github repository. `__. With OpenVINO ``torch.compile`` integration into TorchServe you can serve +PyTorch models in production and accelerate them with OpenVINO on various Intel hardware. Detailed instructions on how to use OpenVINO with TorchServe are +available in `TorchServe examples. `__ and in a `use case app `__. + +Support for Automatic1111 Stable Diffusion WebUI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Automatic1111 Stable Diffusion WebUI is an open-source repository that hosts a browser-based interface for the Stable Diffusion +based image generation. It allows users to create realistic and creative images from text prompts. +Stable Diffusion WebUI is supported on Intel CPUs, Intel integrated GPUs, and Intel discrete GPUs by leveraging OpenVINO +``torch.compile`` capability. Detailed instructions are available in +`Stable Diffusion WebUI repository. `__ + + +Model Quantization and Weights Compression +############################################# + +Model quantization and weights compression are effective methods for accelerating model inference and reducing memory consumption, with minimal impact on model accuracy. The `torch.compile` OpenVINO backend supports two key model optimization APIs: + +1. Neural Network Compression Framework (`NNCF `__). NNCF offers advanced algorithms for post-training quantization and weights compression in the OpenVINO toolkit. + +2. PyTorch 2 export quantization. A general-purpose API designed for quantizing models captured by ``torch.export``. + +NNCF is the recommended approach for model quantization and weights compression. NNCF specifically optimizes models for the OpenVINO backend, providing optimal results in terms of inference speed and accuracy. + + +NNCF Model Optimization Support (Preview) ++++++++++++++++++++++++++++++++++++++++++++++ + +The Neural Network Compression Framework (`NNCF `__) implements advanced quantization and weights compression algorithms, which can be applied to ``torch.fx.GraphModule`` to speed up inference +and decrease memory consumption. + +Model quantization example: + +.. code-block:: python + + import nncf + import openvino.torch + import torch + + calibration_loader = torch.utils.data.DataLoader(...) + + def transform_fn(data_item): + images, _ = data_item + return images + + # Model quantization + quantized_model = nncf.quantize(model, calibration_dataset) + + quantized_model = torch.compile(quantized_model, backend="openvino") + +Model weights compression example: + +.. code-block:: python + + import nncf + import openvino.torch + import torch + + # Weights compression + compressed_model = nncf.compress_model(model) + + compressed_model = torch.compile(compressed_model, backend="openvino") + +NNCF unlocks the full potential of low-precision OpenVINO kernels due to the placement of quantizers designed specifically for the OpenVINO. +Advanced algorithms like ``SmoothQuant`` or ``BiasCorrection`` allow further metrics improvement while minimizing the outputs discrepancies between the original and compressed models. +For further details, please see the `documentation `__ +and a `tutorial `__. + Support for PyTorch 2 export quantization (Preview) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -PyTorch 2 export quantization is supported by OpenVINO backend in ``torch.compile``. To be able +NNCF is the default way to compress models for the OpenVINO backend, however +PyTorch 2 export quantization is supported by OpenVINO backend in ``torch.compile`` as well. To be able to access this feature, follow the steps provided in `PyTorch 2 Export Post Training Quantization with X86 Backend through Inductor `__ and update the provided sample as explained below. @@ -347,24 +421,6 @@ and update the provided sample as explained below. optimized_model = torch.compile(converted_model, backend="openvino") -TorchServe Integration -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -TorchServe is a performant, flexible, and easy to use tool for serving PyTorch models in production. For more information on the details of TorchServe, -you can refer to `TorchServe github repository. `__. With OpenVINO ``torch.compile`` integration into TorchServe you can serve -PyTorch models in production and accelerate them with OpenVINO on various Intel hardware. Detailed instructions on how to use OpenVINO with TorchServe are -available in `TorchServe examples. `__ - -Support for Automatic1111 Stable Diffusion WebUI -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Automatic1111 Stable Diffusion WebUI is an open-source repository that hosts a browser-based interface for the Stable Diffusion -based image generation. It allows users to create realistic and creative images from text prompts. -Stable Diffusion WebUI is supported on Intel CPUs, Intel integrated GPUs, and Intel discrete GPUs by leveraging OpenVINO -``torch.compile`` capability. Detailed instructions are available in -`Stable Diffusion WebUI repository. `__ - - Architecture ################# From 830155476f8e4a4355fd33f54ef420d19f06e53d Mon Sep 17 00:00:00 2001 From: Steve Yoo Date: Thu, 19 Dec 2024 12:48:05 +0900 Subject: [PATCH 12/60] [GPU] Set fc format to bfyx when its spatial pitches are not one (#27751) ### Details: - *Set fc format to bfyx when its spatial pitches are not one to avoid to select fb_io_block kernel* ### Tickets: - *155068* --- .../intel_gpu/src/graph/fully_connected.cpp | 4 +++- .../single_layer_tests/mat_mul.cpp | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 308d9a9f2fd66b..2aee524ac2e3e1 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -84,7 +84,9 @@ format::type get_preferred_format(fully_connected_node const& node, const kernel // "is_batch_after_spatial" should return true) if (data_type_traits::is_floating_point(input_layout.data_type) && input_layout.format == format::bfyx && - input_layout.batch() > 1) + input_layout.batch() > 1 && + input_pitches[2] == 1 && + input_pitches[3] == 1) return format::yxfb; return format::bfyx; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp index 8567cafcdad8e6..58e54f8eee279d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp @@ -148,4 +148,19 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMul_fc_fb_io_block_f16, GPUMatMulLayerTest, ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), MatMulLayerTest::getTestCaseName); + +std::vector> fc4d_shapeRelatedParams = { + { {16, 16, 16, 576}, {576, 1728} }, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatMul_fc4d, GPUMatMulLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(fc4d_shapeRelatedParams)), + ::testing::Values(std::make_pair(false, false)), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(fc_f16_secondaryInputTypes), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(additional_config)), + MatMulLayerTest::getTestCaseName); + } // namespace From f583af7a4790f502d5774281c280bc189305a7f4 Mon Sep 17 00:00:00 2001 From: Wiktor Kobiela Date: Thu, 19 Dec 2024 09:35:33 +0100 Subject: [PATCH 13/60] Revert "support offline CPU in Linux (#27870)" (#28131) This reverts commit 7c34fbdfc2580ad66d97a34a5c68e02130bf6cd8. https://jira.devtools.intel.com/browse/CVS-159641 ### Details: - To fix LTO on Ubuntu 20.04 --- src/inference/src/os/lin/lin_system_conf.cpp | 363 ++++++++---------- .../cpu_map_parser/cache_parser_linux.cpp | 245 ------------ .../unit/cpu_map_parser/freq_parser_linux.cpp | 183 --------- 3 files changed, 150 insertions(+), 641 deletions(-) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 48d486d2ed2d1b..f8bd16173b8fce 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -23,108 +23,76 @@ CPU::CPU() { std::vector> system_info_table; std::vector node_info_table; - constexpr int cache_info_mode = 1; - constexpr int freq_info_mode = 2; - - auto get_info_linux = [&](int mode) { + auto get_cache_info_linux = [&]() { int cpu_index = 0; int cache_index = 0; int cache_files = 3; - std::string one_info; + std::vector one_info(cache_files); - std::vector file_name = {"/topology/core_cpus_list", - "/topology/physical_package_id", - "/cpufreq/cpuinfo_max_freq"}; - int num_of_files = file_name.size(); - - std::string::size_type pos = 0; - std::string::size_type endpos = 0; - std::string sub_str; - - int core_1; - int core_2; - - system_info_table.clear(); - - std::ifstream possible_file("/sys/devices/system/cpu/possible"); - std::string possible_info; + while (1) { + for (int n = 0; n < cache_files; n++) { + cache_index = (n == 0) ? n : n + 1; + + std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + "/cache/index" + + std::to_string(cache_index) + "/shared_cpu_list"); + if (!cache_file.is_open()) { + cache_index = -1; + break; + } + std::string cache_info; + std::getline(cache_file, cache_info); + one_info[n] = std::move(cache_info); + } - if (possible_file.is_open()) { - std::getline(possible_file, possible_info); - } else { - return -1; + if (cache_index == -1) { + if (cpu_index == 0) { + return -1; + } else { + return 0; + } + } else { + system_info_table.push_back(one_info); + cpu_index++; + } } - if ((endpos = possible_info.find('-', pos)) != std::string::npos) { - sub_str = possible_info.substr(pos, endpos - pos); - core_1 = std::stoi(sub_str); - sub_str = possible_info.substr(endpos + 1); - core_2 = std::stoi(sub_str); - system_info_table.resize(core_2 + 1, std::vector(cache_files, "")); - } else { - return -1; - } + return 0; + }; - std::ifstream online_file("/sys/devices/system/cpu/online"); - std::string online_info; + auto get_freq_info_linux = [&]() { + int cpu_index = 0; + int cache_index = 0; - if (online_file.is_open()) { - std::getline(online_file, online_info); - } else { - system_info_table.clear(); - return -1; - } + std::vector file_name = {"/topology/core_cpus_list", + "/topology/physical_package_id", + "/cpufreq/cpuinfo_max_freq"}; + int num_of_files = file_name.size(); + std::vector one_info(num_of_files); while (1) { - if ((endpos = online_info.find('-', pos)) != std::string::npos) { - sub_str = online_info.substr(pos, endpos - pos); - core_1 = std::stoi(sub_str); - sub_str = online_info.substr(endpos + 1); - core_2 = std::stoi(sub_str); + for (int n = 0; n < num_of_files; n++) { + cache_index = n; - for (cpu_index = core_1; cpu_index <= core_2; cpu_index++) { - if (mode == cache_info_mode) { - for (int n = 0; n < cache_files; n++) { - cache_index = (n == 0) ? n : n + 1; - one_info.clear(); - - std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + - "/cache/index" + std::to_string(cache_index) + "/shared_cpu_list"); - if (cache_file.is_open()) { - std::getline(cache_file, one_info); - } else { - if ((cpu_index == core_1) && (n == 0)) { - system_info_table.clear(); - return -1; - } - } - system_info_table[cpu_index][n] = std::move(one_info); - } - } else { - for (int n = 0; n < num_of_files; n++) { - one_info.clear(); - - std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + - file_name[n]); - if (cache_file.is_open()) { - std::getline(cache_file, one_info); - } else { - if ((cpu_index == core_1) && (n == 2)) { - system_info_table.clear(); - return -1; - } - } - system_info_table[cpu_index][n] = std::move(one_info); - } - } + std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + file_name[n]); + if (!cache_file.is_open()) { + cache_index = -1; + break; } + std::string cache_info; + std::getline(cache_file, cache_info); + one_info[n] = std::move(cache_info); } - if ((pos = online_info.find(',', endpos)) != std::string::npos) { - pos++; + if (cache_index == -1) { + if (cpu_index == 0) { + return -1; + } else { + return 0; + } } else { - break; + system_info_table.push_back(one_info); + cpu_index++; } } @@ -233,7 +201,7 @@ CPU::CPU() { get_node_info_linux(); - if (!get_info_linux(cache_info_mode)) { + if (!get_cache_info_linux()) { parse_cache_info_linux(system_info_table, node_info_table, _processors, @@ -247,7 +215,7 @@ CPU::CPU() { if ((_proc_type_table.size() == 0) || ((_proc_type_table[0][MAIN_CORE_PROC] == 0) && (_proc_type_table[0][ALL_PROC] > 0) && (_proc_type_table[0][ALL_PROC] != _proc_type_table[0][EFFICIENT_CORE_PROC]))) { - if (!get_info_linux(freq_info_mode)) { + if (!get_freq_info_linux()) { parse_freq_info_linux(system_info_table, node_info_table, _processors, @@ -503,73 +471,56 @@ void parse_cache_info_linux(const std::vector> system_i const std::vector line_value_0({0, 0, 0, 0, -1, -1}); - std::vector offline_list; - int info_index = 0; - for (int n = 0; n < _processors; n++) { - if ((system_info_table[n][2].size() > 0) || (system_info_table[n][1].size() > 0)) { - info_index = system_info_table[n][2].size() > 0 ? 2 : 1; - if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { - std::string::size_type pos = 0; - std::string::size_type endpos = 0; - std::string sub_str; - - int core_1; - int core_2; - - if (0 == _sockets) { - _proc_type_table.push_back(line_value_0); - } else { - _proc_type_table.push_back(_proc_type_table[0]); - _proc_type_table[0] = line_value_0; - } + if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { + std::string::size_type pos = 0; + std::string::size_type endpos = 0; + std::string sub_str; - while (1) { - if ((endpos = system_info_table[n][info_index].find('-', pos)) != std::string::npos) { - sub_str = system_info_table[n][info_index].substr(pos, endpos - pos); - core_1 = std::stoi(sub_str); - sub_str = system_info_table[n][info_index].substr(endpos + 1); - core_2 = std::stoi(sub_str); - - if ((info_index == 1) && (core_2 - core_1 == 1)) { - offline_list.push_back(n); - break; - } - for (int m = core_1; m <= core_2; m++) { - _cpu_mapping_table[m][CPU_MAP_SOCKET_ID] = _sockets; - _cpu_mapping_table[m][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[m][CPU_MAP_SOCKET_ID]; - update_proc_map_info(m); - if (_processors == 0) { - return; - }; - } - } else if (pos != std::string::npos) { - sub_str = system_info_table[n][info_index].substr(pos); - core_1 = std::stoi(sub_str); - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = _sockets; - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - update_proc_map_info(core_1); + int core_1; + int core_2; + + if (0 == _sockets) { + _proc_type_table.push_back(line_value_0); + } else { + _proc_type_table.push_back(_proc_type_table[0]); + _proc_type_table[0] = line_value_0; + } + + while (1) { + if ((endpos = system_info_table[n][2].find('-', pos)) != std::string::npos) { + sub_str = system_info_table[n][2].substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = system_info_table[n][2].substr(endpos + 1); + core_2 = std::stoi(sub_str); + + for (int m = core_1; m <= core_2; m++) { + _cpu_mapping_table[m][CPU_MAP_SOCKET_ID] = _sockets; + _cpu_mapping_table[m][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[m][CPU_MAP_SOCKET_ID]; + update_proc_map_info(m); if (_processors == 0) { return; }; - endpos = pos; - } - - if ((pos = system_info_table[n][2].find(',', endpos)) != std::string::npos) { - pos++; - } else { - break; } + } else if (pos != std::string::npos) { + sub_str = system_info_table[n][2].substr(pos); + core_1 = std::stoi(sub_str); + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = _sockets; + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + update_proc_map_info(core_1); + if (_processors == 0) { + return; + }; + endpos = pos; } - _sockets++; - if (_proc_type_table[0][ALL_PROC] == 0) { - _proc_type_table.erase(_proc_type_table.begin()); - _sockets--; + + if ((pos = system_info_table[n][2].find(',', endpos)) != std::string::npos) { + pos++; + } else { + break; } } - } else { - offline_list.push_back(n); + _sockets++; } } @@ -589,11 +540,6 @@ void parse_cache_info_linux(const std::vector> system_i _numa_nodes = node_info_table.size(); parse_node_info_linux(node_info_table, _numa_nodes, _sockets, _proc_type_table, _cpu_mapping_table); } - - for (size_t n = 0; n < offline_list.size(); n++) { - _cpu_mapping_table.erase(_cpu_mapping_table.begin() + offline_list[n] - n); - _processors--; - } }; void get_cpu_mapping_from_cores(const int _processors, @@ -669,6 +615,7 @@ void parse_freq_info_linux(const std::vector> system_in std::vector>& _cpu_mapping_table) { int freq_max = 0; bool ecore_enabled = false; + bool ht_enabled = false; _processors = system_info_table.size(); _numa_nodes = 0; @@ -678,8 +625,6 @@ void parse_freq_info_linux(const std::vector> system_in std::vector line_value_0(PROC_TYPE_TABLE_SIZE, 0); - std::vector offline_list; - auto clean_up_output = [&]() { _processors = 0; _cores = 0; @@ -691,68 +636,65 @@ void parse_freq_info_linux(const std::vector> system_in }; for (int n = 0; n < _processors; n++) { - if (system_info_table[n][2].size() > 0) { - if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { - std::string::size_type pos = 0; - std::string::size_type endpos1 = 0; - std::string::size_type endpos2 = 0; - std::string sub_str; - - int core_1 = 0; - int core_2 = 0; - - if (((endpos1 = system_info_table[n][0].find(',', pos)) != std::string::npos) || - ((endpos2 = system_info_table[n][0].find('-', pos)) != std::string::npos)) { - endpos1 = (endpos1 != std::string::npos) ? endpos1 : endpos2; - sub_str = system_info_table[n][0].substr(pos, endpos1 - pos); - core_1 = std::stoi(sub_str); - sub_str = system_info_table[n][0].substr(endpos1 + 1); - core_2 = std::stoi(sub_str); - if ((core_1 != n) && (core_2 != n)) { - clean_up_output(); - return; - } + if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { + std::string::size_type pos = 0; + std::string::size_type endpos1 = 0; + std::string::size_type endpos2 = 0; + std::string sub_str; - _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = HYPER_THREADING_PROC; - _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; - - _cpu_mapping_table[core_2][CPU_MAP_PROCESSOR_ID] = core_2; - _cpu_mapping_table[core_2][CPU_MAP_SOCKET_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_2][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_2][CPU_MAP_CORE_ID] = _cpu_mapping_table[core_1][CPU_MAP_CORE_ID]; - _cpu_mapping_table[core_2][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; - _cpu_mapping_table[core_2][CPU_MAP_GROUP_ID] = _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID]; - - int core_freq = std::stoi(system_info_table[core_1][2]); - freq_max = std::max(core_freq, freq_max); - } else if (system_info_table[n][0].size() > 0) { - core_1 = std::stoi(system_info_table[n][0]); + int core_1 = 0; + int core_2 = 0; - _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + if (((endpos1 = system_info_table[n][0].find(',', pos)) != std::string::npos) || + ((endpos2 = system_info_table[n][0].find('-', pos)) != std::string::npos)) { + endpos1 = (endpos1 != std::string::npos) ? endpos1 : endpos2; + sub_str = system_info_table[n][0].substr(pos, endpos1 - pos); + core_1 = std::stoi(sub_str); + sub_str = system_info_table[n][0].substr(endpos1 + 1); + core_2 = std::stoi(sub_str); + if ((core_1 != n) && (core_2 != n)) { + clean_up_output(); + return; + } - int core_freq = std::stoi(system_info_table[core_1][2]); - if ((0 == freq_max) || (core_freq >= freq_max * 0.97)) { - freq_max = std::max(core_freq, freq_max); - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; - } else { - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = EFFICIENT_CORE_PROC; - ecore_enabled = true; - } + _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = HYPER_THREADING_PROC; + _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + + _cpu_mapping_table[core_2][CPU_MAP_PROCESSOR_ID] = core_2; + _cpu_mapping_table[core_2][CPU_MAP_SOCKET_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_2][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_2][CPU_MAP_CORE_ID] = _cpu_mapping_table[core_1][CPU_MAP_CORE_ID]; + _cpu_mapping_table[core_2][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; + _cpu_mapping_table[core_2][CPU_MAP_GROUP_ID] = _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID]; + + ht_enabled = true; + int core_freq = std::stoi(system_info_table[core_1][2]); + freq_max = std::max(core_freq, freq_max); + } else if (system_info_table[n][0].size() > 0) { + core_1 = std::stoi(system_info_table[n][0]); - _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + + int core_freq = std::stoi(system_info_table[core_1][2]); + if (((0 == freq_max) || (core_freq >= freq_max * 0.95)) && (!ht_enabled)) { + freq_max = std::max(core_freq, freq_max); + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; + } else { + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = EFFICIENT_CORE_PROC; + ecore_enabled = true; } - _sockets = std::max(_sockets, _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]); - _cores++; + + _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; } - } else { - offline_list.push_back(n); + _sockets = std::max(_sockets, _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]); + _cores++; } } @@ -791,11 +733,6 @@ void parse_freq_info_linux(const std::vector> system_in _numa_nodes = node_info_table.size(); parse_node_info_linux(node_info_table, _numa_nodes, _sockets, _proc_type_table, _cpu_mapping_table); } - - for (size_t n = 0; n < offline_list.size(); n++) { - _cpu_mapping_table.erase(_cpu_mapping_table.begin() + offline_list[n] - n); - _processors--; - } }; void update_valid_processor_linux(const std::vector phy_core_list, diff --git a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp index 9ea43bd0604296..8679090b9ae491 100644 --- a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp @@ -385,188 +385,6 @@ LinuxCpuMapTestCase cache_1sockets_96cores = { {"0-95"}, }, }; -LinuxCpuMapTestCase cache_2sockets_56cores_hyperthreading = { - 110, - 2, - 2, - 56, - {{110, 56, 0, 54, -1, -1}, {54, 28, 0, 26, 0, 0}, {56, 28, 0, 28, 1, 1}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, - {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, - {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, - {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, - {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, - {11, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {12, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, - {13, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {14, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, - {15, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {16, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, - {17, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {18, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, - {19, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {21, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, - {22, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {23, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, - {24, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {25, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, - {26, 0, 0, 24, HYPER_THREADING_PROC, 24, -1}, {27, 0, 0, 25, HYPER_THREADING_PROC, 25, -1}, - {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, - {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, - {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, - {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, - {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, - {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, - {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, - {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, - {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, - {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, - {48, 1, 1, 48, HYPER_THREADING_PROC, 48, -1}, {49, 1, 1, 49, HYPER_THREADING_PROC, 49, -1}, - {50, 1, 1, 50, HYPER_THREADING_PROC, 50, -1}, {51, 1, 1, 51, HYPER_THREADING_PROC, 51, -1}, - {52, 1, 1, 52, HYPER_THREADING_PROC, 52, -1}, {53, 1, 1, 53, HYPER_THREADING_PROC, 53, -1}, - {54, 1, 1, 54, HYPER_THREADING_PROC, 54, -1}, {55, 1, 1, 55, HYPER_THREADING_PROC, 55, -1}, - {56, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {57, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, - {58, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {59, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, - {60, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {61, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, - {62, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {63, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, - {64, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {65, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, - {66, 0, 0, 26, MAIN_CORE_PROC, 26, -1}, {67, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, - {68, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, {69, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, - {70, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, {71, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, - {72, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, {73, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, - {74, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, {75, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, - {76, 0, 0, 27, MAIN_CORE_PROC, 27, -1}, {77, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, - {78, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {79, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, - {80, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {81, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, - {82, 0, 0, 24, MAIN_CORE_PROC, 24, -1}, {83, 0, 0, 25, MAIN_CORE_PROC, 25, -1}, - {84, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {85, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, - {86, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {87, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, - {88, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {89, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, - {90, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {91, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, - {92, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {93, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, - {94, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {95, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, - {96, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {97, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, - {98, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {99, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, - {100, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {101, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, - {102, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {103, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, - {104, 1, 1, 48, MAIN_CORE_PROC, 48, -1}, {105, 1, 1, 49, MAIN_CORE_PROC, 49, -1}, - {106, 1, 1, 50, MAIN_CORE_PROC, 50, -1}, {107, 1, 1, 51, MAIN_CORE_PROC, 51, -1}, - {108, 1, 1, 52, MAIN_CORE_PROC, 52, -1}, {109, 1, 1, 53, MAIN_CORE_PROC, 53, -1}, - {110, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {111, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, - }, - { - {"0,56", "0,56", "0-9,11-19,21-27,56-83"}, - {"1,57", "1,57", "0-9,11-19,21-27,56-83"}, - {"2,58", "2,58", "0-9,11-19,21-27,56-83"}, - {"3,59", "3,59", "0-9,11-19,21-27,56-83"}, - {"4,60", "4,60", "0-9,11-19,21-27,56-83"}, - {"5,61", "5,61", "0-9,11-19,21-27,56-83"}, - {"6,62", "6,62", "0-9,11-19,21-27,56-83"}, - {"7,63", "7,63", "0-9,11-19,21-27,56-83"}, - {"8,64", "8,64", "0-9,11-19,21-27,56-83"}, - {"9,65", "9,65", "0-9,11-19,21-27,56-83"}, - {"", "", ""}, - {"11,67", "11,67", "0-9,11-19,21-27,56-83"}, - {"12,68", "12,68", "0-9,11-19,21-27,56-83"}, - {"13,69", "13,69", "0-9,11-19,21-27,56-83"}, - {"14,70", "14,70", "0-9,11-19,21-27,56-83"}, - {"15,71", "15,71", "0-9,11-19,21-27,56-83"}, - {"16,72", "16,72", "0-9,11-19,21-27,56-83"}, - {"17,73", "17,73", "0-9,11-19,21-27,56-83"}, - {"18,74", "18,74", "0-9,11-19,21-27,56-83"}, - {"19,75", "19,75", "0-9,11-19,21-27,56-83"}, - {"", "", ""}, - {"21,77", "21,77", "0-9,11-19,21-27,56-83"}, - {"22,78", "22,78", "0-9,11-19,21-27,56-83"}, - {"23,79", "23,79", "0-9,11-19,21-27,56-83"}, - {"24,80", "24,80", "0-9,11-19,21-27,56-83"}, - {"25,81", "25,81", "0-9,11-19,21-27,56-83"}, - {"26,82", "26,82", "0-9,11-19,21-27,56-83"}, - {"27,83", "27,83", "0-9,11-19,21-27,56-83"}, - {"28,84", "28,84", "28-55,84-111"}, - {"29,85", "29,85", "28-55,84-111"}, - {"30,86", "30,86", "28-55,84-111"}, - {"31,87", "31,87", "28-55,84-111"}, - {"32,88", "32,88", "28-55,84-111"}, - {"33,89", "33,89", "28-55,84-111"}, - {"34,90", "34,90", "28-55,84-111"}, - {"35,91", "35,91", "28-55,84-111"}, - {"36,92", "36,92", "28-55,84-111"}, - {"37,93", "37,93", "28-55,84-111"}, - {"38,94", "38,94", "28-55,84-111"}, - {"39,95", "39,95", "28-55,84-111"}, - {"40,96", "40,96", "28-55,84-111"}, - {"41,97", "41,97", "28-55,84-111"}, - {"42,98", "42,98", "28-55,84-111"}, - {"43,99", "43,99", "28-55,84-111"}, - {"44,100", "44,100", "28-55,84-111"}, - {"45,101", "45,101", "28-55,84-111"}, - {"46,102", "46,102", "28-55,84-111"}, - {"47,103", "47,103", "28-55,84-111"}, - {"48,104", "48,104", "28-55,84-111"}, - {"49,105", "49,105", "28-55,84-111"}, - {"50,106", "50,106", "28-55,84-111"}, - {"51,107", "51,107", "28-55,84-111"}, - {"52,108", "52,108", "28-55,84-111"}, - {"53,109", "53,109", "28-55,84-111"}, - {"54,110", "54,110", "28-55,84-111"}, - {"55,111", "55,111", "28-55,84-111"}, - {"0,56", "0,56", "0-9,11-19,21-27,56-83"}, - {"1,57", "1,57", "0-9,11-19,21-27,56-83"}, - {"2,58", "2,58", "0-9,11-19,21-27,56-83"}, - {"3,59", "3,59", "0-9,11-19,21-27,56-83"}, - {"4,60", "4,60", "0-9,11-19,21-27,56-83"}, - {"5,61", "5,61", "0-9,11-19,21-27,56-83"}, - {"6,62", "6,62", "0-9,11-19,21-27,56-83"}, - {"7,63", "7,63", "0-9,11-19,21-27,56-83"}, - {"8,64", "8,64", "0-9,11-19,21-27,56-83"}, - {"9,65", "9,65", "0-9,11-19,21-27,56-83"}, - {"66", "66", "0-9,11-19,21-27,56-83"}, - {"11,67", "11,67", "0-9,11-19,21-27,56-83"}, - {"12,68", "12,68", "0-9,11-19,21-27,56-83"}, - {"13,69", "13,69", "0-9,11-19,21-27,56-83"}, - {"14,70", "14,70", "0-9,11-19,21-27,56-83"}, - {"15,71", "15,71", "0-9,11-19,21-27,56-83"}, - {"16,72", "16,72", "0-9,11-19,21-27,56-83"}, - {"17,73", "17,73", "0-9,11-19,21-27,56-83"}, - {"18,74", "18,74", "0-9,11-19,21-27,56-83"}, - {"19,75", "19,75", "0-9,11-19,21-27,56-83"}, - {"76", "76", "0-9,11-19,21-27,56-83"}, - {"21,77", "21,77", "0-9,11-19,21-27,56-83"}, - {"22,78", "22,78", "0-9,11-19,21-27,56-83"}, - {"23,79", "23,79", "0-9,11-19,21-27,56-83"}, - {"24,80", "24,80", "0-9,11-19,21-27,56-83"}, - {"25,81", "25,81", "0-9,11-19,21-27,56-83"}, - {"26,82", "26,82", "0-9,11-19,21-27,56-83"}, - {"27,83", "27,83", "0-9,11-19,21-27,56-83"}, - {"28,84", "28,84", "28-55,84-111"}, - {"29,85", "29,85", "28-55,84-111"}, - {"30,86", "30,86", "28-55,84-111"}, - {"31,87", "31,87", "28-55,84-111"}, - {"32,88", "32,88", "28-55,84-111"}, - {"33,89", "33,89", "28-55,84-111"}, - {"34,90", "34,90", "28-55,84-111"}, - {"35,91", "35,91", "28-55,84-111"}, - {"36,92", "36,92", "28-55,84-111"}, - {"37,93", "37,93", "28-55,84-111"}, - {"38,94", "38,94", "28-55,84-111"}, - {"39,95", "39,95", "28-55,84-111"}, - {"40,96", "40,96", "28-55,84-111"}, - {"41,97", "41,97", "28-55,84-111"}, - {"42,98", "42,98", "28-55,84-111"}, - {"43,99", "43,99", "28-55,84-111"}, - {"44,100", "44,100", "28-55,84-111"}, - {"45,101", "45,101", "28-55,84-111"}, - {"46,102", "46,102", "28-55,84-111"}, - {"47,103", "47,103", "28-55,84-111"}, - {"48,104", "48,104", "28-55,84-111"}, - {"49,105", "49,105", "28-55,84-111"}, - {"50,106", "50,106", "28-55,84-111"}, - {"51,107", "51,107", "28-55,84-111"}, - {"52,108", "52,108", "28-55,84-111"}, - {"53,109", "53,109", "28-55,84-111"}, - {"54,110", "54,110", "28-55,84-111"}, - {"55,111", "55,111", "28-55,84-111"}, - }, - { - {"0-9,11-19,21-27,56-83"}, - {"28-55,84-111"}, - }, -}; LinuxCpuMapTestCase cache_2sockets_48cores_hyperthreading = { 96, 2, @@ -1187,36 +1005,6 @@ LinuxCpuMapTestCase cache_2sockets_20cores_hyperthreading_1 = { }, {}, }; -LinuxCpuMapTestCase cache_1sockets_16cores_hyperthreading = { - 20, - 1, - 1, - 14, - {{20, 6, 8, 6, 0, 0}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, - {2, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {3, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, - {4, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {5, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, - {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, - {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, - {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, - {12, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 6, -1}, - {14, 0, 0, 8, EFFICIENT_CORE_PROC, 6, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 6, -1}, - {16, 0, 0, 10, EFFICIENT_CORE_PROC, 7, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 7, -1}, - {18, 0, 0, 12, EFFICIENT_CORE_PROC, 7, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 7, -1}, - }, - { - {"0,5", "0,5", "0-19"}, {"1-2", "1-2", "0-19"}, {"1-2", "1-2", "0-19"}, {"3-4", "3-4", "0-19"}, - {"3-4", "3-4", "0-19"}, {"0,5", "0,5", "0-19"}, {"6-7", "6-7", "0-19"}, {"6-7", "6-7", "0-19"}, - {"8-9", "8-9", "0-19"}, {"8-9", "8-9", "0-19"}, {"10-11", "10-11", "0-19"}, {"10-11", "10-11", "0-19"}, - {"12", "12-15", "0-19"}, {"13", "12-15", "0-19"}, {"14", "12-15", "0-19"}, {"15", "12-15", "0-19"}, - {"16", "16-19", "0-19"}, {"17", "16-19", "0-19"}, {"18", "16-19", "0-19"}, {"19", "16-19", "0-19"}, - {"20", "20-21", ""}, {"21", "20-21", ""}, - }, - { - {"0-21"}, - }, -}; LinuxCpuMapTestCase cache_1sockets_14cores_hyperthreading = { 20, 1, @@ -1347,36 +1135,6 @@ LinuxCpuMapTestCase cache_1sockets_8cores_hyperthreading = { }, {{"0-11"}}, }; -LinuxCpuMapTestCase cache_1sockets_8cores_hyperthreading_1 = { - 8, - 1, - 1, - 8, - {{8, 4, 4, 0, 0, 0}}, - { - {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, - {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, - {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, - {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, - {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, - {5, 0, 0, 5, EFFICIENT_CORE_PROC, 4, -1}, - {6, 0, 0, 6, EFFICIENT_CORE_PROC, 4, -1}, - {7, 0, 0, 7, EFFICIENT_CORE_PROC, 4, -1}, - }, - { - {"0", "0", "0-3"}, - {"1", "1", "0-3"}, - {"2", "2", "0-3"}, - {"3", "3", "0-3"}, - {"4", "4-7", ""}, - {"5", "4-7", ""}, - {"6", "4-7", ""}, - {"7", "4-7", ""}, - }, - { - {"0-7"}, - }, -}; LinuxCpuMapTestCase cache_1sockets_6cores_hyperthreading = { 12, 1, @@ -1462,7 +1220,6 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapCacheParserTests, testing::Values(cache_2sockets_104cores_hyperthreading, cache_1sockets_96cores, - cache_2sockets_56cores_hyperthreading, cache_2sockets_48cores_hyperthreading, cache_2sockets_48cores_hyperthreading_1, cache_2sockets_24cores_hyperthreading, @@ -1472,12 +1229,10 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, cache_2sockets_48cores_2, cache_2sockets_20cores_hyperthreading, cache_2sockets_20cores_hyperthreading_1, - cache_1sockets_16cores_hyperthreading, cache_1sockets_14cores_hyperthreading, cache_1sockets_14cores_hyperthreading_1, cache_1sockets_10cores_hyperthreading, cache_1sockets_8cores_hyperthreading, - cache_1sockets_8cores_hyperthreading_1, cache_1sockets_6cores_hyperthreading, cache_1sockets_4cores, cache_VM_cache_0)); diff --git a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp index 8ccdfad011d19c..04ab617961b953 100644 --- a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp @@ -258,188 +258,6 @@ LinuxCpuMapTestCase freq_2sockets_112cores_hyperthreading = { }, // param[in]: The CPU frequency information table of this simulated platform {{"0-55,112-167"}, {"56-111,168-223"}}, // param[in]: The numa node information table of this simulated platform }; -LinuxCpuMapTestCase freq_2sockets_56cores_hyperthreading = { - 110, - 2, - 2, - 56, - {{110, 56, 0, 54, -1, -1}, {54, 28, 0, 26, 0, 0}, {56, 28, 0, 28, 1, 1}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, - {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, - {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, - {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, - {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, - {11, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {12, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, - {13, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {14, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, - {15, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {16, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, - {17, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {18, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, - {19, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {21, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, - {22, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {23, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, - {24, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {25, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, - {26, 0, 0, 24, HYPER_THREADING_PROC, 24, -1}, {27, 0, 0, 25, HYPER_THREADING_PROC, 25, -1}, - {28, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {29, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, - {30, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {31, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, - {32, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {33, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, - {34, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {35, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, - {36, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {37, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, - {38, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {39, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, - {40, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {41, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, - {42, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {43, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, - {44, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {45, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, - {46, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {47, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, - {48, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {49, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, - {50, 1, 1, 48, HYPER_THREADING_PROC, 48, -1}, {51, 1, 1, 49, HYPER_THREADING_PROC, 49, -1}, - {52, 1, 1, 50, HYPER_THREADING_PROC, 50, -1}, {53, 1, 1, 51, HYPER_THREADING_PROC, 51, -1}, - {54, 1, 1, 52, HYPER_THREADING_PROC, 52, -1}, {55, 1, 1, 53, HYPER_THREADING_PROC, 53, -1}, - {56, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {57, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, - {58, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {59, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, - {60, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {61, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, - {62, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {63, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, - {64, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {65, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, - {66, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {67, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, - {68, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, {69, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, - {70, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, {71, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, - {72, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, {73, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, - {74, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, {75, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, - {76, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, {77, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, - {78, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {79, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, - {80, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {81, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, - {82, 0, 0, 24, MAIN_CORE_PROC, 24, -1}, {83, 0, 0, 25, MAIN_CORE_PROC, 25, -1}, - {84, 1, 1, 26, MAIN_CORE_PROC, 26, -1}, {85, 1, 1, 27, MAIN_CORE_PROC, 27, -1}, - {86, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {87, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, - {88, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {89, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, - {90, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {91, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, - {92, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {93, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, - {94, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {95, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, - {96, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {97, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, - {98, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {99, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, - {100, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {101, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, - {102, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {103, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, - {104, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {105, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, - {106, 1, 1, 48, MAIN_CORE_PROC, 48, -1}, {107, 1, 1, 49, MAIN_CORE_PROC, 49, -1}, - {108, 1, 1, 50, MAIN_CORE_PROC, 50, -1}, {109, 1, 1, 51, MAIN_CORE_PROC, 51, -1}, - {110, 1, 1, 52, MAIN_CORE_PROC, 52, -1}, {111, 1, 1, 53, MAIN_CORE_PROC, 53, -1}, - }, - { - {"0,56", "0", "3500000"}, - {"1,57", "0", "3500000"}, - {"2,58", "0", "3500000"}, - {"3,59", "0", "3500000"}, - {"4,60", "0", "3500000"}, - {"5,61", "0", "3500000"}, - {"6,62", "0", "3500000"}, - {"7,63", "0", "3500000"}, - {"8,64", "0", "3500000"}, - {"9,65", "0", "3500000"}, - {"", "", ""}, - {"11,67", "0", "3500000"}, - {"12,68", "0", "3500000"}, - {"13,69", "0", "3500000"}, - {"14,70", "0", "3500000"}, - {"15,71", "0", "3500000"}, - {"16,72", "0", "3500000"}, - {"17,73", "0", "3500000"}, - {"18,74", "0", "3500000"}, - {"19,75", "0", "3500000"}, - {"", "", ""}, - {"21,77", "0", "3500000"}, - {"22,78", "0", "3500000"}, - {"23,79", "0", "3500000"}, - {"24,80", "0", "3500000"}, - {"25,81", "0", "3500000"}, - {"26,82", "0", "3500000"}, - {"27,83", "0", "3500000"}, - {"28,84", "1", "3500000"}, - {"29,85", "1", "3500000"}, - {"30,86", "1", "3500000"}, - {"31,87", "1", "3500000"}, - {"32,88", "1", "3500000"}, - {"33,89", "1", "3500000"}, - {"34,90", "1", "3500000"}, - {"35,91", "1", "3500000"}, - {"36,92", "1", "3500000"}, - {"37,93", "1", "3500000"}, - {"38,94", "1", "3500000"}, - {"39,95", "1", "3500000"}, - {"40,96", "1", "3500000"}, - {"41,97", "1", "3500000"}, - {"42,98", "1", "3500000"}, - {"43,99", "1", "3500000"}, - {"44,100", "1", "3500000"}, - {"45,101", "1", "3500000"}, - {"46,102", "1", "3500000"}, - {"47,103", "1", "3500000"}, - {"48,104", "1", "3500000"}, - {"49,105", "1", "3500000"}, - {"50,106", "1", "3500000"}, - {"51,107", "1", "3500000"}, - {"52,108", "1", "3500000"}, - {"53,109", "1", "3500000"}, - {"54,110", "1", "3500000"}, - {"55,111", "1", "3500000"}, - {"0,56", "0", "3500000"}, - {"1,57", "0", "3500000"}, - {"2,58", "0", "3500000"}, - {"3,59", "0", "3500000"}, - {"4,60", "0", "3500000"}, - {"5,61", "0", "3500000"}, - {"6,62", "0", "3500000"}, - {"7,63", "0", "3500000"}, - {"8,64", "0", "3500000"}, - {"9,65", "0", "3500000"}, - {"66", "0", "3500000"}, - {"11,67", "0", "3500000"}, - {"12,68", "0", "3500000"}, - {"13,69", "0", "3500000"}, - {"14,70", "0", "3500000"}, - {"15,71", "0", "3500000"}, - {"16,72", "0", "3500000"}, - {"17,73", "0", "3500000"}, - {"18,74", "0", "3500000"}, - {"19,75", "0", "3500000"}, - {"76", "0", "3500000"}, - {"21,77", "0", "3500000"}, - {"22,78", "0", "3500000"}, - {"23,79", "0", "3500000"}, - {"24,80", "0", "3500000"}, - {"25,81", "0", "3500000"}, - {"26,82", "0", "3500000"}, - {"27,83", "0", "3500000"}, - {"28,84", "1", "3500000"}, - {"29,85", "1", "3500000"}, - {"30,86", "1", "3500000"}, - {"31,87", "1", "3500000"}, - {"32,88", "1", "3500000"}, - {"33,89", "1", "3500000"}, - {"34,90", "1", "3500000"}, - {"35,91", "1", "3500000"}, - {"36,92", "1", "3500000"}, - {"37,93", "1", "3500000"}, - {"38,94", "1", "3500000"}, - {"39,95", "1", "3500000"}, - {"40,96", "1", "3500000"}, - {"41,97", "1", "3500000"}, - {"42,98", "1", "3500000"}, - {"43,99", "1", "3500000"}, - {"44,100", "1", "3500000"}, - {"45,101", "1", "3500000"}, - {"46,102", "1", "3500000"}, - {"47,103", "1", "3500000"}, - {"48,104", "1", "3500000"}, - {"49,105", "1", "3500000"}, - {"50,106", "1", "3500000"}, - {"51,107", "1", "3500000"}, - {"52,108", "1", "3500000"}, - {"53,109", "1", "3500000"}, - {"54,110", "1", "3500000"}, - {"55,111", "1", "3500000"}, - }, - { - {"0-9,11-19,21-27,56-83"}, - {"28-55,84-111"}, - }, -}; LinuxCpuMapTestCase freq_2sockets_48cores_hyperthreading = { 96, 2, @@ -1169,7 +987,6 @@ TEST_P(LinuxCpuMapFreqParserTests, LinuxFreq) {} INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapFreqParserTests, testing::Values(freq_2sockets_112cores_hyperthreading, - freq_2sockets_56cores_hyperthreading, freq_2sockets_48cores_hyperthreading, freq_2sockets_48cores_hyperthreading_1, freq_2sockets_24cores_hyperthreading, From f5b85f1e70c1f49c8d06bb4036b484a9ce751283 Mon Sep 17 00:00:00 2001 From: Pavel Durandin Date: Thu, 19 Dec 2024 12:28:29 +0400 Subject: [PATCH 14/60] GPU: Cache encryption doc (#28137) ### Details: Cache encryption doc - following of https://github.com/openvinotoolkit/openvino/pull/28035 rolled back - to add GPU availability check for GHA in c++ and python snippets --------- Co-authored-by: Tomasz Krupa Co-authored-by: Sebastian Golebiewski --- .../assets/snippets/ov_caching.cpp | 38 ++++++++++++++++++- .../articles_en/assets/snippets/ov_caching.py | 18 +++++++++ .../model-caching-overview.rst | 22 ++++++++++- 3 files changed, 75 insertions(+), 3 deletions(-) diff --git a/docs/articles_en/assets/snippets/ov_caching.cpp b/docs/articles_en/assets/snippets/ov_caching.cpp index aa08a739261b81..f3113438e20642 100644 --- a/docs/articles_en/assets/snippets/ov_caching.cpp +++ b/docs/articles_en/assets/snippets/ov_caching.cpp @@ -90,6 +90,41 @@ auto compiled = core.compile_model(model, device, config); // Step 5: } } +void part5() { + std::string modelPath = "/tmp/myModel.xml"; + std::string device = "GPU"; + ov::Core core; // Step 1: create ov::Core object + bool hasGPU = false; // Step 1a: Check if GPU is available + auto devices = core.get_available_devices(); + for (auto&& supported : devices) { + hasGPU |= supported.find(device) != std::string::npos; + } + if(!hasGPU) { + return; + } + core.set_property(ov::cache_dir("/path/to/cache/dir")); // Step 1b: Enable caching +//! [ov:caching:part5] +static const char codec_key[] = {0x30, 0x60, 0x70, 0x02, 0x04, 0x08, 0x3F, 0x6F, 0x72, 0x74, 0x78, 0x7F}; +auto codec_xor = [&](const std::string& source_str) { + auto key_size = sizeof(codec_key); + int key_idx = 0; + std::string dst_str = source_str; + for (char& c : dst_str) { + c ^= codec_key[key_idx % key_size]; + key_idx++; + } + return dst_str; +}; +auto compiled = core.compile_model(modelPath, + device, + ov::cache_encryption_callbacks(ov::EncryptionCallbacks{codec_xor, codec_xor}), + ov::cache_mode(ov::CacheMode::OPTIMIZE_SIZE)); // Step 5: Compile model +//! [ov:caching:part5] + if (!compiled) { + throw std::runtime_error("error"); + } +} + int main() { try { part0(); @@ -97,7 +132,8 @@ int main() { part2(); part3(); part4(); + part5(); } catch (...) { } return 0; -} \ No newline at end of file +} diff --git a/docs/articles_en/assets/snippets/ov_caching.py b/docs/articles_en/assets/snippets/ov_caching.py index 57bd72f3f9b80b..b4534ebcd2d9c3 100644 --- a/docs/articles_en/assets/snippets/ov_caching.py +++ b/docs/articles_en/assets/snippets/ov_caching.py @@ -59,3 +59,21 @@ def decrypt_base64(src): model = core.read_model(model=model_path) compiled_model = core.compile_model(model=model, device_name=device_name, config=config_cache) # ! [ov:caching:part4] + +# ! [ov:caching:part5] +import base64 + +def encrypt_base64(src): + return base64.b64encode(bytes(src, "utf-8")) + +def decrypt_base64(src): + return base64.b64decode(bytes(src, "utf-8")) + +core = ov.Core() +if "GPU" in core.available_devices: + core.set_property({props.cache_dir: path_to_cache_dir}) + config_cache = {} + config_cache["CACHE_ENCRYPTION_CALLBACKS"] = [encrypt_base64, decrypt_base64] + config_cache["CACHE_MODE"] = "OPTIMIZE_SIZE" + compiled_model = core.compile_model(model=model_path, device_name='GPU', config=config_cache) +# ! [ov:caching:part5] diff --git a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst index 181622ff55baf1..b3253f775bdb02 100644 --- a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst +++ b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst @@ -139,7 +139,7 @@ To check in advance if a particular device supports model caching, your applicat Set "cache_encryption_callbacks" config option to enable cache encryption +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -If model caching is enabled, the model topology can be encrypted when saving to the cache and decrypted when loading from the cache. This property can currently be set only in ``compile_model``. +If model caching is enabled in the CPU Plugin, the model topology can be encrypted while it is saved to the cache and decrypted when it is loaded from the cache. Currently, this property can be set only in ``compile_model``. .. tab-set:: @@ -157,6 +157,24 @@ If model caching is enabled, the model topology can be encrypted when saving to :language: cpp :fragment: [ov:caching:part4] +If model caching is enabled in the GPU Plugin, the model topology can be encrypted while it is saved to the cache and decrypted when it is loaded from the cache. Full encryption only works when the ``CacheMode`` property is set to ``OPTIMIZE_SIZE``. + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. doxygensnippet:: docs/articles_en/assets/snippets/ov_caching.py + :language: py + :fragment: [ov:caching:part5] + + .. tab-item:: C++ + :sync: cpp + + .. doxygensnippet:: docs/articles_en/assets/snippets/ov_caching.cpp + :language: cpp + :fragment: [ov:caching:part5] + .. important:: - Currently, this property is supported only by the CPU plugin. For other HW plugins, setting this property will not encrypt/decrypt the model topology in cache and will not affect performance. + Currently, this property is supported only by the CPU and GPU plugins. For other HW plugins, setting this property will not encrypt/decrypt the model topology in cache and will not affect performance. From a2b00ece6c84f018f7e7f12185dc2f76f9dd89da Mon Sep 17 00:00:00 2001 From: Arshad Mehmood Date: Thu, 19 Dec 2024 16:37:14 +0800 Subject: [PATCH 15/60] [GPU] Enabled uint8_t input for ArgMinMax ensuring TF test compliance (#27971) ### Details: This update introduces support for the uint8_t data type in the argminmax implementation on the cldnn path, which was previously lacking. Specifically, for the TopKV2 test case, the OpenVino IR optimization reduced the model to an argminmax primitive that did not support uint8_t inputs, causing failures. With the new uint8_t support for argminmax and the addition of a kernel, the test now passes. An OpenVino unit test has been added to verify uint8_t data type handling, with test inputs consisting of non-negative values. ### Tickets: - CVS-153078 - CVS-156587 Signed-off-by: Arshad Mehmood --- .../src/graph/impls/ocl/arg_max_min.cpp | 2 +- .../arg_max_min/arg_max_min_kernel_axis.cpp | 1 + .../arg_max_min_kernel_gpu_ref.cpp | 1 + .../arg_max_min/arg_max_min_kernel_opt.cpp | 1 + .../unit/test_cases/arg_max_gpu_test.cpp | 34 +++++++++++++++++-- .../tensorflow_tests/test_tf_ArgMinMax.py | 2 -- .../tensorflow_tests/test_tf_TopKV2.py | 2 -- 7 files changed, 35 insertions(+), 8 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp index dd1e8d256860d7..496d38d9b44210 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp @@ -131,7 +131,7 @@ struct arg_max_min_impl : typed_primitive_impl_ocl { namespace detail { attach_arg_max_min_impl::attach_arg_max_min_impl() { - auto types = {data_types::f16, data_types::f32, data_types::i8, data_types::i32}; + auto types = {data_types::f16, data_types::f32, data_types::i8, data_types::i32, data_types::u8}; auto formats = { format::bfyx, diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_axis.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_axis.cpp index ecb6be6f17020d..4cedb9a3c7b6c7 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_axis.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_axis.cpp @@ -52,6 +52,7 @@ ParamsKey ArgMaxMinKernelAxis::GetSupportedKey() const { k.EnableInputDataType(Datatype::F16); k.EnableInputDataType(Datatype::F32); k.EnableInputDataType(Datatype::INT8); + k.EnableInputDataType(Datatype::UINT8); k.EnableInputDataType(Datatype::INT32); k.EnableAllOutputDataType(); k.EnableInputLayout(DataLayout::bfyx); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_gpu_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_gpu_ref.cpp index 991edfcf093383..26b45f220968b2 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_gpu_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_gpu_ref.cpp @@ -10,6 +10,7 @@ ParamsKey ArgMaxMinKernelGPURef::GetSupportedKey() const { k.EnableInputDataType(Datatype::F16); k.EnableInputDataType(Datatype::F32); k.EnableInputDataType(Datatype::INT8); + k.EnableInputDataType(Datatype::UINT8); k.EnableAllOutputDataType(); k.EnableInputLayout(DataLayout::bfyx); k.EnableInputLayout(DataLayout::yxfb); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_opt.cpp index 5f31efdd089b7c..5216a9f53de7e8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/arg_max_min/arg_max_min_kernel_opt.cpp @@ -10,6 +10,7 @@ ParamsKey ArgMaxMinKernelOpt::GetSupportedKey() const { k.EnableInputDataType(Datatype::F16); k.EnableInputDataType(Datatype::F32); k.EnableInputDataType(Datatype::INT8); + k.EnableInputDataType(Datatype::UINT8); k.EnableOutputDataType(Datatype::F32); k.EnableInputLayout(DataLayout::bfyx); k.EnableOutputLayout(DataLayout::bfyx); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp index eb532b2357f1da..ea65a864020e73 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp @@ -16,7 +16,6 @@ using namespace cldnn; using namespace ::tests; - template struct arg_max_input_types { static const auto format = layoutFormat; @@ -56,10 +55,21 @@ using format_types = testing::Types, arg_max_input_types, arg_max_input_types, arg_max_input_types, - arg_max_input_types>; + arg_max_input_types, + arg_max_input_types, + arg_max_input_types, + arg_max_input_types, + arg_max_input_types>; TYPED_TEST_SUITE(argmax_gpu_test, format_types); +// Helper trait to check for uint8_t input_type +template +struct is_uint8_input : std::false_type {}; + +template +struct is_uint8_input> : std::true_type {}; + TYPED_TEST(argmax_gpu_test, base) { // Input : 2x4x2x2 static const int32_t x_size = 2, y_size = 2, feature_num = 4, batch_num = 2; @@ -82,7 +92,25 @@ TYPED_TEST(argmax_gpu_test, base) { /*b1f1*/ 4.f, 0.5f, 8.f, 8.2f, /*b1f2*/ 0.2f, 0.2f, -10.f, 5.2f, /*b1f3*/ 4.f, 0.5f, 8.f, 8.2f}; - set_values(input, this->getTypedVector(input_vec)); + + // Positive values for u8 input type test + std::vector input_vec_u8 = {// y0x0 y0x1 y1x0 y1x1 + /*b0f0*/ 0.1f, 0.1f, 0.9f, 1.5f, + /*b0f1*/ 0.2f, 0.2f, 0.1f, 5.2f, + /*b0f2*/ 0.2f, 0.2f, 0.1f, 5.2f, + /*b0f3*/ 0.2f, 0.2f, 0.1f, 4.2f, + + /*b1f0*/ 3.f, 0.5f, 7.f, 10.f, + /*b1f1*/ 4.f, 0.5f, 8.f, 8.2f, + /*b1f2*/ 0.2f, 0.2f, 0.1f, 5.2f, + /*b1f3*/ 4.f, 0.5f, 8.f, 8.2f}; + + // If format is of type u8 then use non negative values as input. + if (is_uint8_input::value) { + set_values(input, this->getTypedVector(input_vec_u8)); + } else { + set_values(input, this->getTypedVector(input_vec)); + } network network(engine, topology, get_test_default_config(engine)); diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py index 785ef72a60f3a1..5ea8b8e65086a3 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py @@ -69,8 +69,6 @@ def test_argmin_max_net(self, input_shape, dimension, input_type, output_type, o ie_device, precision, ir_version, temp_dir, use_legacy_frontend): if platform.machine() in ['aarch64', 'arm64', 'ARM64']: pytest.skip('153077: Segmentation fault on ARM') - if ie_device == 'GPU' and input_type == np.uint8: - pytest.skip('153078: No layout format available for topk') if ie_device == 'GPU' and input_type == np.float32 and input_shape == [10, 15, 20]: pytest.skip('153079: Accuracy error on GPU') self._test(*self.create_argmin_max_net(input_shape=input_shape, dimension=dimension, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py b/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py index 23d5c6bf2c23fe..65efbe7c6b8bd2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py @@ -55,8 +55,6 @@ def create_topk_v2_net(self, input_shape, input_type, k, k_type, sorted, index_t def test_topk_v2(self, input_shape, input_type, k, k_type, sorted, index_type, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): - if ie_device == 'GPU' and input_type == np.uint8: - pytest.skip('156587: Check correct_layout_selected failed for input uint8 on GPU') if platform.machine() in ['arm', 'armv7l', 'aarch64', 'arm64', 'ARM64'] and \ input_type in [np.int32, np.uint8, np.int16, np.int8, np.int64, np.uint16, np.uint32, np.uint64]: From 0e2edd87493e3bbe9b2373e4d50f1514ecbffadf Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Thu, 19 Dec 2024 10:30:31 +0000 Subject: [PATCH 16/60] [CI] [GHA] Setup pip cache directory per Python on Windows; do not use cache for `setup-python` action (#28015) ### Tickets: - *155118* - *158400* - *158574* - *159198* --- .github/actions/setup_python/action.yml | 5 +---- .../scripts/workflow_rerun/errors_to_look_for.json | 4 ++++ .github/workflows/job_build_windows.yml | 14 +++++++++++--- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/.github/actions/setup_python/action.yml b/.github/actions/setup_python/action.yml index 96968f55636df9..d1290508ab778f 100644 --- a/.github/actions/setup_python/action.yml +++ b/.github/actions/setup_python/action.yml @@ -22,7 +22,6 @@ inputs: runs: using: 'composite' steps: - - name: Check if Python is already installed (Linux) if: ${{ runner.os == 'Linux' }} shell: bash @@ -54,13 +53,11 @@ runs: with: python-version: ${{ inputs.version }} - - if: ${{ runner.os == 'macOS' || runner.os == 'Windows' || (runner.os == 'Linux' && runner.arch != 'ARM64' && steps.check_python.outputs.installed == 'false' ) }} + - if: ${{ runner.os == 'macOS' || runner.os == 'Windows' || (runner.os == 'Linux' && runner.arch != 'ARM64' && steps.check_python.outputs.installed == 'false') }} name: Setup Python ${{ inputs.version }} uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: ${{ inputs.version }} - env: - PIP_CACHE_DIR: ${{ inputs.self-hosted-runner == 'true' && inputs.pip-cache-path || '' }} - if: ${{ inputs.should-setup-pip-paths == 'true' && runner.os != 'Windows' }} name: Setup pip variables (cache and install path) diff --git a/.github/scripts/workflow_rerun/errors_to_look_for.json b/.github/scripts/workflow_rerun/errors_to_look_for.json index ad771e9d51f75d..b9cac8f17adaa6 100644 --- a/.github/scripts/workflow_rerun/errors_to_look_for.json +++ b/.github/scripts/workflow_rerun/errors_to_look_for.json @@ -82,5 +82,9 @@ { "error_text": "Upload progress stalled", "ticket": 152933 + }, + { + "error_text": "because the GET request got Content-Type", + "ticket": 158400 } ] \ No newline at end of file diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index 4e3969d978cb83..d5d42ffcfea8d2 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -222,12 +222,20 @@ jobs: $pythonCommand = "py -$pyVersion -c `"import sys; print(f'{sys.executable}')`"" $pythonExecutablePath = & cmd /c $pythonCommand - + + $pipVersion = & $pythonExecutablePath -c "import pip; print(pip.__version__)" + Write-Host "Using pip version: $pipVersion for $pyVersion" + $env:PIP_CACHE_DIR="${{ env.PIP_CACHE_PATH }}/$pipVersion" + & $pythonExecutablePath -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt - cmake -DPython3_EXECUTABLE="$pythonExecutablePath" -DOpenVINODeveloperPackage_DIR=${{ env.BUILD_DIR }} -S ${{ env.OPENVINO_REPO }}/src/bindings/python -B "$pyBuildDir" - cmake --build "$pyBuildDir" --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + cmake -DPython3_EXECUTABLE="$pythonExecutablePath" -DOpenVINODeveloperPackage_DIR=${{ env.BUILD_DIR }} -S ${{ env.OPENVINO_REPO }}/src/bindings/python -B "$pyBuildDir" && + cmake --build "$pyBuildDir" --parallel --config ${{ env.CMAKE_BUILD_TYPE }} && cmake --install "$pyBuildDir" --config ${{ env.CMAKE_BUILD_TYPE }} --prefix ${{ env.INSTALL_WHEELS_DIR }} --component python_wheels + if ($LASTEXITCODE -ne 0) { + Write-Host "Failed to build Python wheels for Python $pyVersion" + exit 1 + } } - name: Pack Artifacts From 220633e9037a57f9a44ef6f0893fe59a310d3425 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 19 Dec 2024 12:14:57 +0100 Subject: [PATCH 17/60] [DOCS] Updating Versions of Archive Installation Packages (#28104) (#28143) port: https://github.com/openvinotoolkit/openvino/pull/28104 Signed-off-by: sgolebiewski-intel Co-authored-by: Sebastian Golebiewski --- .../install-openvino-archive-linux.rst | 52 +++++++++---------- .../install-openvino-archive-macos.rst | 20 +++---- .../install-openvino-archive-windows.rst | 18 +++---- .../install-openvino-genai.rst | 26 +++++----- 4 files changed, 58 insertions(+), 58 deletions(-) diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst b/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst index 77b23ca9b2d6a4..7224d63d0380b9 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-archive-linux.rst @@ -58,7 +58,7 @@ Step 1: Download and Install the OpenVINO Core Components cd /Downloads -4. Download the `OpenVINO Runtime archive file for your system `_, extract the files, rename the extracted folder and move it to the desired path: +4. Download the `OpenVINO Runtime archive file for your system `_, extract the files, rename the extracted folder and move it to the desired path: .. tab-set:: @@ -73,9 +73,9 @@ Step 1: Download and Install the OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_ubuntu24_2024.5.0.17288.7975fa5da0c_x86_64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_ubuntu24_2024.5.0.17288.7975fa5da0c_x86_64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_ubuntu24_2024.6.0.17404.4c0f47d2335_x86_64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_ubuntu24_2024.6.0.17404.4c0f47d2335_x86_64 /opt/intel/openvino_2024.6.0 .. tab-item:: Ubuntu 22.04 :sync: ubuntu-22 @@ -83,9 +83,9 @@ Step 1: Download and Install the OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_ubuntu22_2024.5.0.17288.7975fa5da0c_x86_64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_ubuntu22_2024.5.0.17288.7975fa5da0c_x86_64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_ubuntu22_2024.6.0.17404.4c0f47d2335_x86_64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_ubuntu22_2024.6.0.17404.4c0f47d2335_x86_64 /opt/intel/openvino_2024.6.0 .. tab-item:: Ubuntu 20.04 :sync: ubuntu-20 @@ -93,9 +93,9 @@ Step 1: Download and Install the OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_ubuntu20_2024.5.0.17288.7975fa5da0c_x86_64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_ubuntu20_2024.5.0.17288.7975fa5da0c_x86_64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_ubuntu20_2024.6.0.17404.4c0f47d2335_x86_64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_ubuntu20_2024.6.0.17404.4c0f47d2335_x86_64 /opt/intel/openvino_2024.6.0 .. tab-item:: RHEL 8 :sync: rhel-8 @@ -103,18 +103,18 @@ Step 1: Download and Install the OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_rhel8_2024.5.0.17288.7975fa5da0c_x86_64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_rhel8_2024.5.0.17288.7975fa5da0c_x86_64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_rhel8_2024.6.0.17404.4c0f47d2335_x86_64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_rhel8_2024.6.0.17404.4c0f47d2335_x86_64 /opt/intel/openvino_2024.6.0 .. tab-item:: CentOS 7 :sync: centos-7 .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_centos7_2024.5.0.17288.7975fa5da0c_x86_64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_centos7_2024.5.0.17288.7975fa5da0c_x86_64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_centos7_2024.6.0.17404.4c0f47d2335_x86_64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_centos7_2024.6.0.17404.4c0f47d2335_x86_64 /opt/intel/openvino_2024.6.0 .. tab-item:: ARM 64-bit @@ -122,25 +122,25 @@ Step 1: Download and Install the OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_ubuntu20_2024.5.0.17288.7975fa5da0c_arm64.tgz -O openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_ubuntu20_2024.5.0.17288.7975fa5da0c_arm64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_ubuntu20_2024.6.0.17404.4c0f47d2335_arm64.tgz -O openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_ubuntu20_2024.6.0.17404.4c0f47d2335_arm64 /opt/intel/openvino_2024.6.0 .. tab-item:: ARM 32-bit :sync: arm-32 .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/linux/l_openvino_toolkit_debian10_2024.5.0.17288.7975fa5da0c_armhf.tgz -O openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv l_openvino_toolkit_debian10_2024.5.0.17288.7975fa5da0c_armhf /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux/l_openvino_toolkit_debian10_2024.6.0.17404.4c0f47d2335_armhf.tgz -O openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv l_openvino_toolkit_debian10_2024.6.0.17404.4c0f47d2335_armhf /opt/intel/openvino_2024.6.0 5. Install required system dependencies on Linux. To do this, OpenVINO provides a script in the extracted installation directory. Run the following command: .. code-block:: sh - cd /opt/intel/openvino_2024.5.0 + cd /opt/intel/openvino_2024.6.0 sudo -E ./install_dependencies/install_openvino_dependencies.sh 6. (Optional) Install *numpy* Python Library: @@ -149,11 +149,11 @@ Step 1: Download and Install the OpenVINO Core Components This step is required only when you decide to use Python API. - You can use the ``requirements.txt`` file from the ``/opt/intel/openvino_2024.5.0/python`` folder: + You can use the ``requirements.txt`` file from the ``/opt/intel/openvino_2024.6.0/python`` folder: .. code-block:: sh - cd /opt/intel/openvino_2024.5.0 + cd /opt/intel/openvino_2024.6.0 python3 -m pip install -r ./python/requirements.txt 7. For simplicity, it is useful to create a symbolic link as below: @@ -162,7 +162,7 @@ Step 1: Download and Install the OpenVINO Core Components cd /opt/intel - sudo ln -s openvino_2024.5.0 openvino_2024 + sudo ln -s openvino_2024.6.0 openvino_2024 .. note:: If you have already installed a previous release of OpenVINO 2024, a symbolic link to the ``openvino_2024`` folder may already exist. diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst b/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst index b02d7f4f1984fc..0cf2f5f31548dc 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-archive-macos.rst @@ -47,7 +47,7 @@ Step 1: Install OpenVINO Core Components cd /Downloads -4. Download the `OpenVINO Runtime archive file for macOS `__, extract the files, rename the extracted folder and move it to the desired path: +4. Download the `OpenVINO Runtime archive file for macOS `__, extract the files, rename the extracted folder and move it to the desired path: .. tab-set:: @@ -57,9 +57,9 @@ Step 1: Install OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/macos/m_openvino_toolkit_macos_12_6_2024.5.0.17288.7975fa5da0c_x86_64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv m_openvino_toolkit_macos_12_6_2024.5.0.17288.7975fa5da0c_x86_64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/macos/m_openvino_toolkit_macos_12_6_2024.6.0.17404.4c0f47d2335_x86_64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv m_openvino_toolkit_macos_12_6_2024.6.0.17404.4c0f47d2335_x86_64 /opt/intel/openvino_2024.6.0 .. tab-item:: ARM, 64-bit :sync: arm-64 @@ -67,9 +67,9 @@ Step 1: Install OpenVINO Core Components .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/macos/m_openvino_toolkit_macos_12_6_2024.5.0.17288.7975fa5da0c_arm64.tgz --output openvino_2024.5.0.tgz - tar -xf openvino_2024.5.0.tgz - sudo mv m_openvino_toolkit_macos_12_6_2024.5.0.17288.7975fa5da0c_arm64 /opt/intel/openvino_2024.5.0 + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/macos/m_openvino_toolkit_macos_12_6_2024.6.0.17404.4c0f47d2335_arm64.tgz --output openvino_2024.6.0.tgz + tar -xf openvino_2024.6.0.tgz + sudo mv m_openvino_toolkit_macos_12_6_2024.6.0.17404.4c0f47d2335_arm64 /opt/intel/openvino_2024.6.0 5. (Optional) Install *numpy* Python Library: @@ -78,11 +78,11 @@ Step 1: Install OpenVINO Core Components This step is required only when you decide to use Python API. - You can use the ``requirements.txt`` file from the ``/opt/intel/openvino_2024.5.0/python`` folder: + You can use the ``requirements.txt`` file from the ``/opt/intel/openvino_2024.6.0/python`` folder: .. code-block:: sh - cd /opt/intel/openvino_2024.5.0 + cd /opt/intel/openvino_2024.6.0 python3 -m pip install -r ./python/requirements.txt 6. For simplicity, it is useful to create a symbolic link as below: @@ -90,7 +90,7 @@ Step 1: Install OpenVINO Core Components .. code-block:: sh - sudo ln -s /opt/intel/openvino_2024.5.0 /opt/intel/openvino_2024 + sudo ln -s /opt/intel/openvino_2024.6.0 /opt/intel/openvino_2024 .. note:: diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst b/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst index bdcd89d6b195b1..52d1d6d4be0814 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-archive-windows.rst @@ -41,18 +41,18 @@ Step 1: Download and Install OpenVINO Core Components ``C:\Program Files (x86)\Intel`` is the recommended folder. You may also use a different path if desired or if you don't have administrator privileges on your computer. -2. Download the `OpenVINO Runtime archive file for Windows `__ to your local ``Downloads`` folder. +2. Download the `OpenVINO Runtime archive file for Windows `__ to your local ``Downloads`` folder. If you prefer using command-lines, run the following commands in the command prompt window you opened: .. code-block:: sh cd /Downloads - curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/windows/w_openvino_toolkit_windows_2024.5.0.17288.7975fa5da0c_x86_64.zip --output openvino_2024.5.0.zip + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows/w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64.zip --output openvino_2024.6.0.zip .. note:: - A ``.sha256`` file is provided together with the archive file to validate your download process. To do that, download the ``.sha256`` file from the same repository and run ``CertUtil -hashfile openvino_2024.5.0.zip SHA256``. Compare the returned value in the output with what's in the ``.sha256`` file: if the values are the same, you have downloaded the correct file successfully; if not, create a Support ticket `here `__. + A ``.sha256`` file is provided together with the archive file to validate your download process. To do that, download the ``.sha256`` file from the same repository and run ``CertUtil -hashfile openvino_2024.6.0.zip SHA256``. Compare the returned value in the output with what's in the ``.sha256`` file: if the values are the same, you have downloaded the correct file successfully; if not, create a Support ticket `here `__. 3. Use your favorite tool to extract the archive file, rename the extracted folder, and move it to the ``C:\Program Files (x86)\Intel`` directory. @@ -61,9 +61,9 @@ Step 1: Download and Install OpenVINO Core Components .. code-block:: sh - tar -xf openvino_2024.5.0.zip - ren w_openvino_toolkit_windows_2024.5.0.17288.7975fa5da0c_x86_64 openvino_2024.5.0 - move openvino_2024.5.0 "C:\Program Files (x86)\Intel" + tar -xf openvino_2024.6.0.zip + ren w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64 openvino_2024.6.0 + move openvino_2024.6.0 "C:\Program Files (x86)\Intel" 4. (Optional) Install *numpy* Python Library: @@ -72,11 +72,11 @@ Step 1: Download and Install OpenVINO Core Components This step is required only when you decide to use Python API. - You can use the ``requirements.txt`` file from the ``C:\Program Files (x86)\Intel\openvino_2024.5.0\python`` folder: + You can use the ``requirements.txt`` file from the ``C:\Program Files (x86)\Intel\openvino_2024.6.0\python`` folder: .. code-block:: sh - cd "C:\Program Files (x86)\Intel\openvino_2024.5.0" + cd "C:\Program Files (x86)\Intel\openvino_2024.6.0" python -m pip install -r .\python\requirements.txt @@ -85,7 +85,7 @@ Step 1: Download and Install OpenVINO Core Components .. code-block:: sh cd C:\Program Files (x86)\Intel - mklink /D openvino_2024 openvino_2024.5.0 + mklink /D openvino_2024 openvino_2024.6.0 .. note:: diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst index a10b0d0c7bbce4..bbfaa7817017ef 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst @@ -47,24 +47,24 @@ Linux .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/linux/openvino_genai_ubuntu24_2024.5.0.0_x86_64.tar.gz --output openvino_genai_2024.5.0.0.tgz - tar -xf openvino_genai_2024.5.0.0.tgz + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux/openvino_genai_ubuntu24_2024.6.0.0_x86_64.tar.gz --output openvino_genai_2024.6.0.0.tgz + tar -xf openvino_genai_2024.6.0.0.tgz .. tab-item:: Ubuntu 22.04 :sync: ubuntu-22 .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/linux/openvino_genai_ubuntu22_2024.5.0.0_x86_64.tar.gz --output openvino_genai_2024.5.0.0.tgz - tar -xf openvino_genai_2024.5.0.0.tgz + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux/openvino_genai_ubuntu22_2024.6.0.0_x86_64.tar.gz --output openvino_genai_2024.6.0.0.tgz + tar -xf openvino_genai_2024.6.0.0.tgz .. tab-item:: Ubuntu 20.04 :sync: ubuntu-20 .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/linux/openvino_genai_ubuntu20_2024.5.0.0_x86_64.tar.gz --output openvino_genai_2024.5.0.0.tgz - tar -xf openvino_genai_2024.5.0.0.tgz + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux/openvino_genai_ubuntu20_2024.6.0.0_x86_64.tar.gz --output openvino_genai_2024.6.0.0.tgz + tar -xf openvino_genai_2024.6.0.0.tgz .. tab-item:: ARM 64-bit @@ -72,8 +72,8 @@ Linux .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/linux/openvino_genai_ubuntu20_2024.5.0.0_arm64.tar.gz -O openvino_genai_2024.5.0.0.tgz - tar -xf openvino_genai_2024.5.0.0.tgz + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux/openvino_genai_ubuntu20_2024.6.0.0_arm64.tar.gz -O openvino_genai_2024.6.0.0.tgz + tar -xf openvino_genai_2024.6.0.0.tgz Windows @@ -82,7 +82,7 @@ Windows .. code-block:: sh cd /Downloads - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/windows/openvino_genai_windows_2024.5.0.0_x86_64.zip --output openvino_genai_2024.5.0.0.zip + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/windows/openvino_genai_windows_2024.6.0.0_x86_64.zip --output openvino_genai_2024.6.0.0.zip macOS ++++++++++++++++++++++++++ @@ -94,16 +94,16 @@ macOS .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/macos/openvino_genai_macos_12_6_2024.5.0.0_x86_64.tar.gz --output openvino_genai_2024.5.0.0.tgz - tar -xf openvino_genai_2024.5.0.0.tgz + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/macos/openvino_genai_macos_12_6_2024.6.0.0_x86_64.tar.gz --output openvino_genai_2024.6.0.0.tgz + tar -xf openvino_genai_2024.6.0.0.tgz .. tab-item:: ARM, 64-bit :sync: arm-64 .. code-block:: sh - curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.5/macos/openvino_genai_macos_12_6_2024.5.0.0_arm64.tar.gz --output openvino_genai_2024.5.0.0.tgz - tar -xf openvino_genai_2024.5.0.0.tgz + curl -L https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/macos/openvino_genai_macos_12_6_2024.6.0.0_arm64.tar.gz --output openvino_genai_2024.6.0.0.tgz + tar -xf openvino_genai_2024.6.0.0.tgz Here are the full guides: From 5ceba791165e0b9283f1449960982821615df131 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 19 Dec 2024 12:30:08 +0100 Subject: [PATCH 18/60] [DOCS] release notes tweak (#28145) port: https://github.com/openvinotoolkit/openvino/pull/28144 --- docs/articles_en/about-openvino/release-notes-openvino.rst | 3 +++ docs/sphinx_setup/index.rst | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index de233e6fa7cc9d..cdce4fe86f2aaa 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -1735,6 +1735,9 @@ Deprecated and to be removed in the future * “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the future. OpenVINO's dynamic shape models are recommended instead. +* Starting with 2025.0 MacOS x86 will no longer be recommended for use due to the discontinuation + of validation. Full support will be removed later in 2025. + * A number of notebooks have been deprecated. For an up-to-date listing of available notebooks, refer to the `OpenVINO™ Notebook index (openvinotoolkit.github.io) `__. diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index 1e5233ac064d0f..94c0332790663a 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -1,5 +1,5 @@ ============================ -OpenVINO 2024.5 +OpenVINO 2024.6 ============================ .. meta:: From 169da42e738ea30dd6a9c6f66588932f80eeede0 Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Thu, 19 Dec 2024 13:41:02 +0100 Subject: [PATCH 19/60] [DOCS] Port for Graph refactoring to master (#28147) Port for https://github.com/openvinotoolkit/openvino/pull/28146 --- .../benchmarks_files/data/graph-data-ov.json | 16 ++++++++-------- .../_static/benchmarks_files/graph-config.json | 1 + docs/sphinx_setup/_static/js/graphs.js | 14 ++++++++------ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json index c5cfca9df3f095..7a2a9d68a3fefe 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json +++ b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json @@ -16046,7 +16046,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16081,7 +16081,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16116,7 +16116,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16151,7 +16151,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16186,7 +16186,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16256,7 +16256,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16326,7 +16326,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { @@ -16361,7 +16361,7 @@ "bf16": "" } ], - "Unit": "Tokens per Sec", + "Unit": "Tokens per sec", "UnitDesc": "higher is better" }, "latency": { diff --git a/docs/sphinx_setup/_static/benchmarks_files/graph-config.json b/docs/sphinx_setup/_static/benchmarks_files/graph-config.json index e090e5abe97474..78b24d446300ec 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/graph-config.json +++ b/docs/sphinx_setup/_static/benchmarks_files/graph-config.json @@ -25,6 +25,7 @@ "msec/token": "(lower is better)", "Generating time, sec.": "(lower is better)", "msec/token/TDP": "(lower is better)", + "Tokens per sec": "(higher is better)", "FPS": "(higher is better)", "FPS/$": "(higher is better)", "FPS/TDP": "(higher is better)", diff --git a/docs/sphinx_setup/_static/js/graphs.js b/docs/sphinx_setup/_static/js/graphs.js index 04e34d6c2fefe5..f860cffb6b88fd 100644 --- a/docs/sphinx_setup/_static/js/graphs.js +++ b/docs/sphinx_setup/_static/js/graphs.js @@ -661,15 +661,18 @@ $(document).ready(function () { var filteredNetworkModels = Filter.FilterByNetworkModel(graph, [networkModel]); var filteredIeTypes = Filter.ByIeTypes(filteredNetworkModels, ieTypes); var filteredGraphData = Filter.BySortPlatforms(filteredIeTypes, platforms); + var filterdPlatforms = platforms.filter(platform => + filteredGraphData.some(filteredGraph => platform === filteredGraph.Platform) + ); $('.chart-placeholder').append(chartContainer); if (filteredGraphData.length > 0) { if (isLLM === true) { - var graphConfigs = setGraphConfigsByEngines(filteredGraphData, appConfig, kpis, precisions) - createChartWithNewDataByEngines(platforms, graphConfigs, chartContainer, display); + var graphConfigs = setGraphConfigsByEngines(filteredGraphData, appConfig, kpis, precisions); + createChartWithNewDataByEngines(filterdPlatforms, graphConfigs, chartContainer, display); } else { - var graphConfigs = setGraphConfigs(filteredGraphData, appConfig, kpis, precisions) - createChartWithNewData(platforms, graphConfigs, appConfig, chartContainer, display); + var graphConfigs = setGraphConfigs(filteredGraphData, appConfig, kpis, precisions); + createChartWithNewData(filterdPlatforms, graphConfigs, appConfig, chartContainer, display); } } else { @@ -759,8 +762,7 @@ $(document).ready(function () { columnHeaderContainer.append(columnIcon); var columnHeader = $('
    '); columnHeader.append($('
    ' + graphConfig.chartTitle + '
    ')); - columnHeader.append($('
    ' + graphConfig.unit + '
    ')); - columnHeader.append($('
    ' + appConfig.UnitDescription[graphConfig.unit] + '
    ')); + columnHeader.append($('
    ' + graphConfig.unit + ' ' + appConfig.UnitDescription[graphConfig.unit] +'
    ')); columnHeaderContainer.append(columnHeader); chartGraphsContainer.append(graphItem); var graphClass = $('
    '); From d0a8a3914860d0c34dac285fd83cca15efeb031c Mon Sep 17 00:00:00 2001 From: Alicja Miloszewska Date: Thu, 19 Dec 2024 13:53:59 +0100 Subject: [PATCH 20/60] [Py OV] Alias classes and functions in flatten openvino namespace (#28085) ### Details: - Add imports from _pyopenivno on the top of the openvino `__init__.py` - Move `Node` binary operators support at the bottom of openvino `__init__.py` Classes and functions available in `openvino/runtime` will match those in flatten `openvino` ### Tickets: - [CVS-129453](https://jira.devtools.intel.com/browse/CVS-129453) --------- Signed-off-by: Alicja Miloszewska Co-authored-by: Michal Lukaszewski --- src/bindings/python/src/openvino/__init__.py | 71 ++++++----- src/bindings/python/src/openvino/_ov_api.py | 4 +- tools/benchmark_tool/openvino/__init__.py | 71 ++++++----- tools/mo/openvino/__init__.py | 123 ++++++++++++------- tools/openvino_dev/src/openvino/__init__.py | 123 ++++++++++++------- tools/ovc/openvino/__init__.py | 71 ++++++----- 6 files changed, 288 insertions(+), 175 deletions(-) diff --git a/src/bindings/python/src/openvino/__init__.py b/src/bindings/python/src/openvino/__init__.py index e3e878564f3313..7643f742e0067d 100644 --- a/src/bindings/python/src/openvino/__init__.py +++ b/src/bindings/python/src/openvino/__init__.py @@ -17,6 +17,47 @@ # # This __init__.py forces checking of runtime modules to propagate errors. # # It is not compared with init files from openvino-dev package. # # + +# Openvino pybind bindings +from openvino._pyopenvino import AxisSet +from openvino._pyopenvino import AxisVector +from openvino._pyopenvino import ConstOutput +from openvino._pyopenvino import Coordinate +from openvino._pyopenvino import CoordinateDiff +from openvino._pyopenvino import DiscreteTypeInfo +from openvino._pyopenvino import Extension +from openvino._pyopenvino import ProfilingInfo +from openvino._pyopenvino import RTMap +from openvino._pyopenvino import Version +from openvino._pyopenvino import Symbol +from openvino._pyopenvino import Dimension +from openvino._pyopenvino import Input +from openvino._pyopenvino import Output +from openvino._pyopenvino import Node +from openvino._pyopenvino import Strides +from openvino._pyopenvino import PartialShape +from openvino._pyopenvino import Shape +from openvino._pyopenvino import Layout +from openvino._pyopenvino import Type +from openvino._pyopenvino import Tensor +from openvino._pyopenvino import OVAny +from openvino._pyopenvino import get_batch +from openvino._pyopenvino import set_batch +from openvino._pyopenvino import serialize +from openvino._pyopenvino import shutdown +from openvino._pyopenvino import save_model +from openvino._pyopenvino import layout_helpers +from openvino._pyopenvino import RemoteContext +from openvino._pyopenvino import RemoteTensor +from openvino._pyopenvino import Op + +# Import public classes from _ov_api +from openvino._ov_api import Model +from openvino._ov_api import Core +from openvino._ov_api import CompiledModel +from openvino._ov_api import InferRequest +from openvino._ov_api import AsyncInferQueue + # Import all public modules from openvino import runtime as runtime from openvino import frontend as frontend @@ -26,36 +67,10 @@ from openvino import utils as utils from openvino import properties as properties -# Import most important classes and functions from openvino.runtime -from openvino._ov_api import Model -from openvino._ov_api import Core -from openvino._ov_api import CompiledModel -from openvino._ov_api import InferRequest -from openvino._ov_api import AsyncInferQueue - -from openvino.runtime import Symbol -from openvino.runtime import Dimension -from openvino.runtime import Strides -from openvino.runtime import PartialShape -from openvino.runtime import Shape -from openvino.runtime import Layout -from openvino.runtime import Type -from openvino.runtime import Tensor -from openvino.runtime import OVAny - # Helper functions for openvino module -from openvino.runtime.utils.data_helpers import tensor_from_file +from openvino.utils.data_helpers import tensor_from_file from openvino._ov_api import compile_model -from openvino.runtime import get_batch -from openvino.runtime import set_batch -from openvino.runtime import serialize -from openvino.runtime import shutdown -from openvino.runtime import save_model -from openvino.runtime import layout_helpers -from openvino._pyopenvino import RemoteContext -from openvino._pyopenvino import RemoteTensor -from openvino._pyopenvino import Op # Import opsets from openvino import opset1 @@ -80,7 +95,7 @@ from openvino._pyopenvino import VASurfaceTensor # Set version for openvino package -from openvino.runtime import get_version +from openvino._pyopenvino import get_version __version__ = get_version() # Tools diff --git a/src/bindings/python/src/openvino/_ov_api.py b/src/bindings/python/src/openvino/_ov_api.py index 1631bc42051418..972ab4a9eb81c0 100644 --- a/src/bindings/python/src/openvino/_ov_api.py +++ b/src/bindings/python/src/openvino/_ov_api.py @@ -4,9 +4,7 @@ from typing import Any, Iterable, Union, Optional, Dict from pathlib import Path -import warnings -import numpy as np from openvino._pyopenvino import Model as ModelBase from openvino._pyopenvino import Core as CoreBase @@ -15,7 +13,7 @@ from openvino._pyopenvino import Tensor from openvino._pyopenvino import Node -from openvino.runtime.utils.data_helpers import ( +from openvino.utils.data_helpers import ( OVDict, _InferRequestWrapper, _data_dispatch, diff --git a/tools/benchmark_tool/openvino/__init__.py b/tools/benchmark_tool/openvino/__init__.py index e3e878564f3313..7643f742e0067d 100644 --- a/tools/benchmark_tool/openvino/__init__.py +++ b/tools/benchmark_tool/openvino/__init__.py @@ -17,6 +17,47 @@ # # This __init__.py forces checking of runtime modules to propagate errors. # # It is not compared with init files from openvino-dev package. # # + +# Openvino pybind bindings +from openvino._pyopenvino import AxisSet +from openvino._pyopenvino import AxisVector +from openvino._pyopenvino import ConstOutput +from openvino._pyopenvino import Coordinate +from openvino._pyopenvino import CoordinateDiff +from openvino._pyopenvino import DiscreteTypeInfo +from openvino._pyopenvino import Extension +from openvino._pyopenvino import ProfilingInfo +from openvino._pyopenvino import RTMap +from openvino._pyopenvino import Version +from openvino._pyopenvino import Symbol +from openvino._pyopenvino import Dimension +from openvino._pyopenvino import Input +from openvino._pyopenvino import Output +from openvino._pyopenvino import Node +from openvino._pyopenvino import Strides +from openvino._pyopenvino import PartialShape +from openvino._pyopenvino import Shape +from openvino._pyopenvino import Layout +from openvino._pyopenvino import Type +from openvino._pyopenvino import Tensor +from openvino._pyopenvino import OVAny +from openvino._pyopenvino import get_batch +from openvino._pyopenvino import set_batch +from openvino._pyopenvino import serialize +from openvino._pyopenvino import shutdown +from openvino._pyopenvino import save_model +from openvino._pyopenvino import layout_helpers +from openvino._pyopenvino import RemoteContext +from openvino._pyopenvino import RemoteTensor +from openvino._pyopenvino import Op + +# Import public classes from _ov_api +from openvino._ov_api import Model +from openvino._ov_api import Core +from openvino._ov_api import CompiledModel +from openvino._ov_api import InferRequest +from openvino._ov_api import AsyncInferQueue + # Import all public modules from openvino import runtime as runtime from openvino import frontend as frontend @@ -26,36 +67,10 @@ from openvino import utils as utils from openvino import properties as properties -# Import most important classes and functions from openvino.runtime -from openvino._ov_api import Model -from openvino._ov_api import Core -from openvino._ov_api import CompiledModel -from openvino._ov_api import InferRequest -from openvino._ov_api import AsyncInferQueue - -from openvino.runtime import Symbol -from openvino.runtime import Dimension -from openvino.runtime import Strides -from openvino.runtime import PartialShape -from openvino.runtime import Shape -from openvino.runtime import Layout -from openvino.runtime import Type -from openvino.runtime import Tensor -from openvino.runtime import OVAny - # Helper functions for openvino module -from openvino.runtime.utils.data_helpers import tensor_from_file +from openvino.utils.data_helpers import tensor_from_file from openvino._ov_api import compile_model -from openvino.runtime import get_batch -from openvino.runtime import set_batch -from openvino.runtime import serialize -from openvino.runtime import shutdown -from openvino.runtime import save_model -from openvino.runtime import layout_helpers -from openvino._pyopenvino import RemoteContext -from openvino._pyopenvino import RemoteTensor -from openvino._pyopenvino import Op # Import opsets from openvino import opset1 @@ -80,7 +95,7 @@ from openvino._pyopenvino import VASurfaceTensor # Set version for openvino package -from openvino.runtime import get_version +from openvino._pyopenvino import get_version __version__ = get_version() # Tools diff --git a/tools/mo/openvino/__init__.py b/tools/mo/openvino/__init__.py index 46e35babdc9fad..7643f742e0067d 100644 --- a/tools/mo/openvino/__init__.py +++ b/tools/mo/openvino/__init__.py @@ -12,56 +12,91 @@ except ImportError: pass -# OpenVINO API -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino import properties as properties +# # +# # OpenVINO API +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue +# Openvino pybind bindings +from openvino._pyopenvino import AxisSet +from openvino._pyopenvino import AxisVector +from openvino._pyopenvino import ConstOutput +from openvino._pyopenvino import Coordinate +from openvino._pyopenvino import CoordinateDiff +from openvino._pyopenvino import DiscreteTypeInfo +from openvino._pyopenvino import Extension +from openvino._pyopenvino import ProfilingInfo +from openvino._pyopenvino import RTMap +from openvino._pyopenvino import Version +from openvino._pyopenvino import Symbol +from openvino._pyopenvino import Dimension +from openvino._pyopenvino import Input +from openvino._pyopenvino import Output +from openvino._pyopenvino import Node +from openvino._pyopenvino import Strides +from openvino._pyopenvino import PartialShape +from openvino._pyopenvino import Shape +from openvino._pyopenvino import Layout +from openvino._pyopenvino import Type +from openvino._pyopenvino import Tensor +from openvino._pyopenvino import OVAny +from openvino._pyopenvino import get_batch +from openvino._pyopenvino import set_batch +from openvino._pyopenvino import serialize +from openvino._pyopenvino import shutdown +from openvino._pyopenvino import save_model +from openvino._pyopenvino import layout_helpers +from openvino._pyopenvino import RemoteContext +from openvino._pyopenvino import RemoteTensor +from openvino._pyopenvino import Op - from openvino.runtime import Symbol - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny +# Import public classes from _ov_api +from openvino._ov_api import Model +from openvino._ov_api import Core +from openvino._ov_api import CompiledModel +from openvino._ov_api import InferRequest +from openvino._ov_api import AsyncInferQueue - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import experimental as experimental +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino import properties as properties - from openvino._pyopenvino import RemoteContext - from openvino._pyopenvino import RemoteTensor - from openvino._pyopenvino import Op +# Helper functions for openvino module +from openvino.utils.data_helpers import tensor_from_file +from openvino._ov_api import compile_model - # libva related: - from openvino._pyopenvino import VAContext - from openvino._pyopenvino import VASurfaceTensor - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# Import opsets +from openvino import opset1 +from openvino import opset2 +from openvino import opset3 +from openvino import opset4 +from openvino import opset5 +from openvino import opset6 +from openvino import opset7 +from openvino import opset8 +from openvino import opset9 +from openvino import opset10 +from openvino import opset11 +from openvino import opset12 +from openvino import opset13 +from openvino import opset14 +from openvino import opset15 +from openvino import opset16 + +# libva related: +from openvino._pyopenvino import VAContext +from openvino._pyopenvino import VASurfaceTensor + +# Set version for openvino package +from openvino._pyopenvino import get_version +__version__ = get_version() # Tools try: diff --git a/tools/openvino_dev/src/openvino/__init__.py b/tools/openvino_dev/src/openvino/__init__.py index 46e35babdc9fad..7643f742e0067d 100644 --- a/tools/openvino_dev/src/openvino/__init__.py +++ b/tools/openvino_dev/src/openvino/__init__.py @@ -12,56 +12,91 @@ except ImportError: pass -# OpenVINO API -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino import properties as properties +# # +# # OpenVINO API +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue +# Openvino pybind bindings +from openvino._pyopenvino import AxisSet +from openvino._pyopenvino import AxisVector +from openvino._pyopenvino import ConstOutput +from openvino._pyopenvino import Coordinate +from openvino._pyopenvino import CoordinateDiff +from openvino._pyopenvino import DiscreteTypeInfo +from openvino._pyopenvino import Extension +from openvino._pyopenvino import ProfilingInfo +from openvino._pyopenvino import RTMap +from openvino._pyopenvino import Version +from openvino._pyopenvino import Symbol +from openvino._pyopenvino import Dimension +from openvino._pyopenvino import Input +from openvino._pyopenvino import Output +from openvino._pyopenvino import Node +from openvino._pyopenvino import Strides +from openvino._pyopenvino import PartialShape +from openvino._pyopenvino import Shape +from openvino._pyopenvino import Layout +from openvino._pyopenvino import Type +from openvino._pyopenvino import Tensor +from openvino._pyopenvino import OVAny +from openvino._pyopenvino import get_batch +from openvino._pyopenvino import set_batch +from openvino._pyopenvino import serialize +from openvino._pyopenvino import shutdown +from openvino._pyopenvino import save_model +from openvino._pyopenvino import layout_helpers +from openvino._pyopenvino import RemoteContext +from openvino._pyopenvino import RemoteTensor +from openvino._pyopenvino import Op - from openvino.runtime import Symbol - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny +# Import public classes from _ov_api +from openvino._ov_api import Model +from openvino._ov_api import Core +from openvino._ov_api import CompiledModel +from openvino._ov_api import InferRequest +from openvino._ov_api import AsyncInferQueue - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import experimental as experimental +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino import properties as properties - from openvino._pyopenvino import RemoteContext - from openvino._pyopenvino import RemoteTensor - from openvino._pyopenvino import Op +# Helper functions for openvino module +from openvino.utils.data_helpers import tensor_from_file +from openvino._ov_api import compile_model - # libva related: - from openvino._pyopenvino import VAContext - from openvino._pyopenvino import VASurfaceTensor - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# Import opsets +from openvino import opset1 +from openvino import opset2 +from openvino import opset3 +from openvino import opset4 +from openvino import opset5 +from openvino import opset6 +from openvino import opset7 +from openvino import opset8 +from openvino import opset9 +from openvino import opset10 +from openvino import opset11 +from openvino import opset12 +from openvino import opset13 +from openvino import opset14 +from openvino import opset15 +from openvino import opset16 + +# libva related: +from openvino._pyopenvino import VAContext +from openvino._pyopenvino import VASurfaceTensor + +# Set version for openvino package +from openvino._pyopenvino import get_version +__version__ = get_version() # Tools try: diff --git a/tools/ovc/openvino/__init__.py b/tools/ovc/openvino/__init__.py index e3e878564f3313..7643f742e0067d 100644 --- a/tools/ovc/openvino/__init__.py +++ b/tools/ovc/openvino/__init__.py @@ -17,6 +17,47 @@ # # This __init__.py forces checking of runtime modules to propagate errors. # # It is not compared with init files from openvino-dev package. # # + +# Openvino pybind bindings +from openvino._pyopenvino import AxisSet +from openvino._pyopenvino import AxisVector +from openvino._pyopenvino import ConstOutput +from openvino._pyopenvino import Coordinate +from openvino._pyopenvino import CoordinateDiff +from openvino._pyopenvino import DiscreteTypeInfo +from openvino._pyopenvino import Extension +from openvino._pyopenvino import ProfilingInfo +from openvino._pyopenvino import RTMap +from openvino._pyopenvino import Version +from openvino._pyopenvino import Symbol +from openvino._pyopenvino import Dimension +from openvino._pyopenvino import Input +from openvino._pyopenvino import Output +from openvino._pyopenvino import Node +from openvino._pyopenvino import Strides +from openvino._pyopenvino import PartialShape +from openvino._pyopenvino import Shape +from openvino._pyopenvino import Layout +from openvino._pyopenvino import Type +from openvino._pyopenvino import Tensor +from openvino._pyopenvino import OVAny +from openvino._pyopenvino import get_batch +from openvino._pyopenvino import set_batch +from openvino._pyopenvino import serialize +from openvino._pyopenvino import shutdown +from openvino._pyopenvino import save_model +from openvino._pyopenvino import layout_helpers +from openvino._pyopenvino import RemoteContext +from openvino._pyopenvino import RemoteTensor +from openvino._pyopenvino import Op + +# Import public classes from _ov_api +from openvino._ov_api import Model +from openvino._ov_api import Core +from openvino._ov_api import CompiledModel +from openvino._ov_api import InferRequest +from openvino._ov_api import AsyncInferQueue + # Import all public modules from openvino import runtime as runtime from openvino import frontend as frontend @@ -26,36 +67,10 @@ from openvino import utils as utils from openvino import properties as properties -# Import most important classes and functions from openvino.runtime -from openvino._ov_api import Model -from openvino._ov_api import Core -from openvino._ov_api import CompiledModel -from openvino._ov_api import InferRequest -from openvino._ov_api import AsyncInferQueue - -from openvino.runtime import Symbol -from openvino.runtime import Dimension -from openvino.runtime import Strides -from openvino.runtime import PartialShape -from openvino.runtime import Shape -from openvino.runtime import Layout -from openvino.runtime import Type -from openvino.runtime import Tensor -from openvino.runtime import OVAny - # Helper functions for openvino module -from openvino.runtime.utils.data_helpers import tensor_from_file +from openvino.utils.data_helpers import tensor_from_file from openvino._ov_api import compile_model -from openvino.runtime import get_batch -from openvino.runtime import set_batch -from openvino.runtime import serialize -from openvino.runtime import shutdown -from openvino.runtime import save_model -from openvino.runtime import layout_helpers -from openvino._pyopenvino import RemoteContext -from openvino._pyopenvino import RemoteTensor -from openvino._pyopenvino import Op # Import opsets from openvino import opset1 @@ -80,7 +95,7 @@ from openvino._pyopenvino import VASurfaceTensor # Set version for openvino package -from openvino.runtime import get_version +from openvino._pyopenvino import get_version __version__ = get_version() # Tools From 60d72643946b11500ccd2ee4930877a2c6e2dbf8 Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Thu, 19 Dec 2024 14:55:33 +0200 Subject: [PATCH 21/60] [NPU] Fail for non continuous tensors (#28132) ### Details: - *Fail if non continuous tensor is set* ### Tickets: --------- Signed-off-by: Bogdan Pereanu --- .../common/include/intel_npu/common/igraph.hpp | 2 +- src/plugins/intel_npu/src/common/src/igraph.cpp | 2 +- .../src/common/src/sync_infer_request.cpp | 2 ++ .../functional/behavior/infer_request_run.cpp | 6 ++++++ .../functional/behavior/infer_request_run.hpp | 16 ++++++++++++++++ 5 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/igraph.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/igraph.hpp index 7e718d9172f4f7..a6ab01a4de4030 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/igraph.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/igraph.hpp @@ -52,7 +52,7 @@ class IGraph : public std::enable_shared_from_this { uint32_t get_unique_id(); void set_last_submitted_id(uint32_t id_index); - const uint32_t get_last_submitted_id() const; + uint32_t get_last_submitted_id() const; const std::optional get_batch_size() const; diff --git a/src/plugins/intel_npu/src/common/src/igraph.cpp b/src/plugins/intel_npu/src/common/src/igraph.cpp index fd5463af5eea3e..306080acfd70a1 100644 --- a/src/plugins/intel_npu/src/common/src/igraph.cpp +++ b/src/plugins/intel_npu/src/common/src/igraph.cpp @@ -90,7 +90,7 @@ void IGraph::set_last_submitted_id(uint32_t id_index) { _last_submitted_id = id_index; } -const uint32_t IGraph::get_last_submitted_id() const { +uint32_t IGraph::get_last_submitted_id() const { return _last_submitted_id; } diff --git a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp index 0eeefccf43906d..1379112b3a7852 100644 --- a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp +++ b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp @@ -177,6 +177,8 @@ void SyncInferRequest::check_tensor(const ov::Output& port, bool is_input = ov::op::util::is_parameter(port.get_node()); std::string tensor_type = is_input ? "input" : "output"; + OPENVINO_ASSERT(tensor->is_continuous(), "The tensor is not continuous"); + OPENVINO_ASSERT(port.get_element_type() == tensor->get_element_type(), "The tensor element type is not corresponding with output element type (", tensor->get_element_type(), diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp index e4a49ce9b7ccdb..d14f188a18cb4c 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp @@ -41,3 +41,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_NPU), ::testing::ValuesIn(batchingConfigs)), InferRequestRunTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(compatibility_smoke_BehaviorTest, + ROITensorInference, + ::testing::Combine(::testing::Values(tensor_roi::roi_nchw()), + ::testing::Values(ov::test::utils::DEVICE_NPU)), + ov::test::utils::appendPlatformTypeTestName); diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp index 07466677b9d547..9b63b96ba4e0bc 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp @@ -13,6 +13,7 @@ #include #include "base/ov_behavior_test_utils.hpp" +#include "behavior/ov_infer_request/inference.hpp" #include "common/npu_test_env_cfg.hpp" #include "common/utils.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" @@ -766,6 +767,21 @@ TEST_P(BatchingRunSeqTests, CheckMultipleBatchingRunsSeq) { } } +using ROITensorInference = OVInferRequestInferenceTests; + +TEST_P(ROITensorInference, InferenceROITensor) { + auto model = OVInferRequestInferenceTests::create_n_inputs(1, ov::element::f32, m_param.m_shape); + auto compiled_model = ie->compile_model(model, target_device); + // Create InferRequest + ov::InferRequest req; + req = compiled_model.create_infer_request(); + const std::string tensor_name = "tensor_input0"; + + OV_EXPECT_THROW_HAS_SUBSTRING(req.set_tensor(tensor_name, m_param.m_input_tensor), + ov::Exception, + "The tensor is not continuous"); +} + } // namespace behavior } // namespace test } // namespace ov From 6acc929eedf71dfadd78164b4c2ba389362d24cd Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Thu, 19 Dec 2024 14:01:10 +0100 Subject: [PATCH 22/60] [TRANSFORMATIONS] Derive 'scale' from hidden_dim directly in SDPAToPA (#28091) Currently 'scale' is obtained using a ShapeOf expression as the hidden_dim may be dynamic in some cases and not propagated, so we can't use it directly to create a 'scale' Constant. Check if hidden_dim is static and use it to calculate 'scale' directly omitting the ShapeOf expression. Ticket: * [CVS-158394](https://jira.devtools.intel.com/browse/CVS-158394) Signed-off-by: Andrii Staikov --- .../state_management_pattern.cpp | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/src/common/transformations/src/transformations/sdpa_to_paged_attention/state_management_pattern.cpp b/src/common/transformations/src/transformations/sdpa_to_paged_attention/state_management_pattern.cpp index 28e7cd90019b34..b55c3d73316120 100644 --- a/src/common/transformations/src/transformations/sdpa_to_paged_attention/state_management_pattern.cpp +++ b/src/common/transformations/src/transformations/sdpa_to_paged_attention/state_management_pattern.cpp @@ -310,20 +310,28 @@ ov::pass::StateManagementPattern::StateManagementPattern(ParameterVector& kv_par auto v_reshape = std::make_shared(v_target_layout, v0::Constant::create(element::i64, Shape{2}, {0, -1}), true); - auto hidden_shape = std::make_shared(real_q); - auto hidden_dim = std::make_shared(hidden_shape, - v0::Constant::create(element::i64, Shape{}, {-1}), - v0::Constant::create(element::i64, Shape{}, {0})); std::shared_ptr scale; if (pattern_map.count(scale_input)) { scale = pattern_map.at(scale_input).get_node_shared_ptr(); } else { - // most likely `scale` below will always be a constant in real inference, but dynamic dimension - // propagation may not always derive it as a constant. That's why a sub-graph computing `scale` is built - // instead of just a constant node representing one of the dimensions. - scale = std::make_shared( - v0::Constant::create(element::f32, Shape{}, {1}), - std::make_shared(std::make_shared(hidden_dim, element::f32))); + auto real_q_ps = real_q.get_partial_shape(); + + bool rank_is_static = real_q_ps.rank().is_static(); + if (rank_is_static && real_q_ps[real_q_ps.rank().get_length() - 1].is_static()) { + auto hidden_dim_len = static_cast(real_q_ps[real_q_ps.rank().get_length() - 1].get_length()); + scale = v0::Constant::create(element::f32, Shape{}, {1.0 / std::sqrt(hidden_dim_len)}); + } else { + // most likely `scale` below will always be a constant in real inference, but dynamic dimension + // propagation may not always derive it as a constant. That's why a sub-graph computing `scale` is built + // instead of just a constant node representing one of the dimensions. + auto hidden_shape = std::make_shared(real_q); + auto hidden_dim = std::make_shared(hidden_shape, + v0::Constant::create(element::i64, Shape{}, {-1}), + v0::Constant::create(element::i64, Shape{}, {0})); + scale = std::make_shared( + v0::Constant::create(element::f32, Shape{}, {1}), + std::make_shared(std::make_shared(hidden_dim, element::f32))); + } } std::shared_ptr alibi_slopes; From b982e19525eeecb0cf385591bd1ec182a781cb05 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 19 Dec 2024 14:19:32 +0100 Subject: [PATCH 23/60] [FE] Set OV model's output's name after parsing model by frontend (#28105) ### Details: - The model after read by IR frontend shows models output tensors names as Nodes connected to Result's. But by default this names are not dedicated Results name which can cause during pre-post processing that names stay on node and will disapear as model output names. To fix set the names as Results names so during transformations they will stay as model's output names. - Onnx frontens set OV model output's names when converting model to OV represenation. - Fix NPU test which reports `Attempt to get a name for a Tensor without names` ### Related PRs: - #28102 ### Tickets: - CVS-159401 --------- Signed-off-by: Raasz, Pawel --- src/frontends/ir/src/ir_deserializer.cpp | 13 +++ .../tests/pre_processing_deserialization.cpp | 99 +++++++++++++++++++ .../onnx/frontend/src/input_model.cpp | 7 ++ src/frontends/onnx/tests/load_from.cpp | 29 ++++++ 4 files changed, 148 insertions(+) diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index d7e250f9916302..62caebeee7d355 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/core/descriptor_tensor.hpp" #include "openvino/core/except.hpp" #include "openvino/core/meta_data.hpp" #include "openvino/core/rt_info/weightless_caching_attributes.hpp" @@ -18,6 +19,7 @@ #include "openvino/op/result.hpp" #include "openvino/op/util/assign_base.hpp" #include "openvino/op/util/framework_node.hpp" +#include "openvino/op/util/op_types.hpp" #include "openvino/op/util/read_value_base.hpp" #include "openvino/op/util/sub_graph_base.hpp" #include "openvino/op/util/variable.hpp" @@ -1023,6 +1025,17 @@ std::shared_ptr ov::XmlDeserializer::create_node(const std::vector(ovNode.get())) { + if (!ov::op::util::is_parameter(result->get_input_source_output(0).get_node())) { + // Copy names if parent node is not parameter, model's input names should not be dedicated + // output names as they could be removed from Parameter's tensor during model transformations. + ov::descriptor::copy_tensor_names(result->get_output_tensor(0), result->get_input_tensor(0)); + } + } } return ovNode; diff --git a/src/frontends/ir/tests/pre_processing_deserialization.cpp b/src/frontends/ir/tests/pre_processing_deserialization.cpp index f97cf078b53951..460cff3d9dbfb5 100644 --- a/src/frontends/ir/tests/pre_processing_deserialization.cpp +++ b/src/frontends/ir/tests/pre_processing_deserialization.cpp @@ -1,8 +1,10 @@ // Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#include #include "frontend_test.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" class IRFrontendTestsPreProcessing : public ::testing::Test, public IRFrontendTestsImpl { protected: @@ -71,3 +73,100 @@ TEST_F(IRFrontendTestsPreProcessing, pre_processing) { OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } + +namespace ov { +namespace test { + +using testing::ElementsAre; +using testing::Property; +using testing::UnorderedElementsAre; + +TEST_F(IRFrontendTestsPreProcessing, check_tensor_names_after_read_and_pre_post_processing) { + std::string xml_model = R"V0G0N( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +// )V0G0N"; + + constexpr auto DATA_COUNT = 1; + std::vector buffer(DATA_COUNT * sizeof(float), 0); + std::fill_n(reinterpret_cast(buffer.data()), DATA_COUNT, 1.f); + + createTemporalModelFile(xml_model, buffer); + + std::shared_ptr model; + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + ASSERT_NE(model, nullptr); + + EXPECT_THAT(model->inputs(), + ElementsAre(Property("Input 0", &Output::get_names, UnorderedElementsAre("input_a")), + Property("Input 1", &Output::get_names, UnorderedElementsAre("input_b")))); + + EXPECT_THAT(model->outputs(), + ElementsAre(Property("Output 0", &Output::get_names, UnorderedElementsAre("add_result")), + // Directly connected to model input shows input's names. + Property("Output 1", &Output::get_names, UnorderedElementsAre("input_b")))); + + auto p = preprocess::PrePostProcessor(model); + p.output(0).tensor().set_element_type(element::f16); + p.output(1).tensor().set_element_type(element::i32); + model = p.build(); + + EXPECT_THAT(model->inputs(), + ElementsAre(Property("Input 0", &Output::get_names, UnorderedElementsAre("input_a")), + Property("Input 1", &Output::get_names, UnorderedElementsAre("input_b")))); + + EXPECT_THAT(model->outputs(), + ElementsAre(Property("Output 0", &Output::get_names, UnorderedElementsAre("add_result")), + // After PPP (inserts convert node) the tensor names stay on model's input. + Property("Output 1", &Output::get_names, testing::IsEmpty()))); +} +} // namespace test +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index 87f1439eb18b38..9410f54e428b3f 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -533,6 +533,13 @@ void InputModel::add_tensor_names(std::shared_ptr& model) { it->add_names(tensor_names.second); } } + + // Set model output names + for (auto&& result : model->get_results()) { + if (!is_type(result->get_input_source_output(0).get_node())) { + result->get_output_tensor(0).add_names(result->get_input_tensor(0).get_names()); + } + } } void InputModel::reshape_model_inputs(std::shared_ptr& model) { diff --git a/src/frontends/onnx/tests/load_from.cpp b/src/frontends/onnx/tests/load_from.cpp index 547937ac52171f..e74262991652d0 100644 --- a/src/frontends/onnx/tests/load_from.cpp +++ b/src/frontends/onnx/tests/load_from.cpp @@ -10,11 +10,15 @@ #include "common_test_utils/test_assertions.hpp" #include "onnx_utils.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" #include "utils.hpp" using namespace ov::frontend; using ONNXLoadTest = FrontEndLoadFromTest; +using testing::ElementsAre; +using testing::Property; +using testing::UnorderedElementsAre; static LoadFromFEParam getTestData() { LoadFromFEParam res; @@ -58,6 +62,31 @@ TEST_P(FrontEndLoadFromTest, load_model_not_exists_at_path) { OV_EXPECT_THROW(fe->load(model_file_path), ov::Exception, testing::HasSubstr(error_msg)); } +TEST_P(FrontEndLoadFromTest, load_model_and_apply_ppp) { + auto model_file_path = + ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, m_param.m_stream}); + + m_frontEnd = m_fem.load_by_model(model_file_path); + const auto fe_model = m_frontEnd->load(model_file_path); + auto model = m_frontEnd->convert(fe_model); + + EXPECT_THAT(model->inputs(), + ElementsAre(Property("Input 0", &ov::Output::get_names, UnorderedElementsAre("A")), + Property("Input 1", &ov::Output::get_names, UnorderedElementsAre("B")), + Property("Input 2", &ov::Output::get_names, UnorderedElementsAre("C")))); + EXPECT_THAT(model->output(0).get_names(), UnorderedElementsAre("Y")); + + auto p = ov::preprocess::PrePostProcessor(model); + p.output(0).tensor().set_element_type(ov::element::f16); + model = p.build(); + + EXPECT_THAT(model->inputs(), + ElementsAre(Property("Input 0", &ov::Output::get_names, UnorderedElementsAre("A")), + Property("Input 1", &ov::Output::get_names, UnorderedElementsAre("B")), + Property("Input 2", &ov::Output::get_names, UnorderedElementsAre("C")))); + EXPECT_THAT(model->output(0).get_names(), UnorderedElementsAre("Y")); +} + INSTANTIATE_TEST_SUITE_P(ONNXLoadTest, FrontEndLoadFromTest, ::testing::Values(getTestData()), From 2f4c730d59994b7a8303f2f9866b99b035e235fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Krzemi=C5=84ski?= Date: Thu, 19 Dec 2024 14:21:37 +0100 Subject: [PATCH 24/60] [CPU] RandomUniform seed alignment (#25098) ### Details: - Added variants of Philox algorithm generators from PyTorch and Tensorflow to align generated output for the same seed - Added x64 kernels for Philox (Tensorflow), Mersenne (PyTorch) - Added tests for exactness of results - Spec with changes description available here: https://github.com/openvinotoolkit/openvino/pull/26142 ### Tickets: - 134606 --------- Signed-off-by: PiotrKrzem Co-authored-by: Michal Lukaszewski --- .../src/nodes/kernels/x64/random_uniform.cpp | 895 ++++++++++++++++-- .../src/nodes/kernels/x64/random_uniform.hpp | 92 +- .../intel_cpu/src/nodes/random_uniform.cpp | 789 ++++++++++----- .../intel_cpu/src/nodes/random_uniform.hpp | 90 +- .../classes/random_uniform.cpp | 38 +- .../classes/random_uniform.hpp | 1 + .../instances/common/random_uniform.cpp | 7 + .../instances/x64/random_uniform.cpp | 5 +- .../single_layer_tests/random_uniform.cpp | 3 + .../single_op/random_uniform.hpp | 2 + .../src/single_op/random_uniform.cpp | 10 +- 11 files changed, 1584 insertions(+), 348 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp index aa5edf2cb69555..ba46f2052ca032 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp @@ -4,27 +4,56 @@ #include "random_uniform.hpp" +#include + using namespace dnnl::impl::cpu; namespace ov { namespace intel_cpu { namespace kernel { +namespace random_uniform { + +#define GET_PHILOX_OFFSET(field) offsetof(PhiloxGeneratorCallArgs, field) + +#define GET_MERSENNE_OFFSET(field) offsetof(MersenneTwisterGeneratorCallArgs, field) + +#define BROADCAST_CONSTANT(func, vector, aux_register, constant) \ + mov(aux_register, constant); \ + func(vector, aux_register); + +#define BROADCAST_PARAM(func, vector, aux_register, param_args_offset) \ + mov(aux_register, ptr[r64_params + param_args_offset]); \ + func(vector, ptr[aux_register]); + +#define INIT_ARR(A, V, R, T) \ + static const T A[8] = {V, V, V, V, V, V, V, V}; \ + if (isa == x64::avx2) { \ + mov(R, reinterpret_cast(A)); \ + } else { \ + static const T* A##_aligned = A + (reinterpret_cast(A) % 16) / sizeof(T); \ + mov(R, reinterpret_cast(A##_aligned)); \ + } -#define GET_OFF(field) offsetof(RandomUniformCallArgs, field) +union FloatAsBits { + float f; + uint32_t u; +}; + +////////////// PHILOX GENERATOR ///////////////////////// template -RandomUniform::RandomUniform(const RandomUniformCompileParams& jcp) : JitKernel(jit_name(), jcp, isa) {} +PhiloxGenerator::PhiloxGenerator(const PhiloxGeneratorCompileParams& jcp) : JitKernel(jit_name(), jcp, isa) {} template -void RandomUniform::generate() { +void PhiloxGenerator::generate() { this->preamble(); registersPool = RegistersPool::create(isa, {rax, rcx, rsp, rdi, k0}); r64_dst = getReg64(); r64_work_amount = getReg64(); - mov(r64_work_amount, ptr[r64_params + GET_OFF(work_amount)]); - mov(r64_dst, ptr[r64_params + GET_OFF(dst_ptr)]); + mov(r64_work_amount, ptr[r64_params + GET_PHILOX_OFFSET(work_amount)]); + mov(r64_dst, ptr[r64_params + GET_PHILOX_OFFSET(dst_ptr)]); initVectors(); process(); @@ -34,7 +63,7 @@ void RandomUniform::generate() { } template <> -void RandomUniform::initVectors() { +void PhiloxGenerator::initVectors() { const auto r64_aux = getReg64(); const auto r32_aux = Xbyak::Reg32(r64_aux.getIdx()); const auto r16_aux = Xbyak::Reg16(r64_aux.getIdx()); @@ -56,51 +85,44 @@ void RandomUniform::initVectors() { v_convert_1 = getVmm(); } - // Initialize constants. -#define BROADCAST_R(F, V, R, C) \ - mov(R, C); \ - F(V, R); -#define BROADCAST_P(F, V, R, C) \ - mov(R, ptr[r64_params + GET_OFF(C)]); \ - F(V, ptr[R]); - - BROADCAST_R(vpbroadcastq, v_max_mul_n_64, r64_aux, STATISTIC_MAXIMIZING_MULTIPLIER_N) - BROADCAST_R(vpbroadcastq, v_max_mul_c_64, r64_aux, STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER) - BROADCAST_R(vpbroadcastd, v_add_low_k, r32_aux, CRUSH_RESISTANCE_CONST_LOWER_VALUE) - BROADCAST_R(vpbroadcastd, v_add_up_k, r32_aux, CRUSH_RESISTANCE_CONST_UPPER_VALUE) - BROADCAST_R(vpbroadcastq, v_n_inc, r64_aux, 0x00000008) + // Initialize constants + BROADCAST_CONSTANT(vpbroadcastq, v_max_mul_n_64, r64_aux, STATISTIC_MAXIMIZING_MULTIPLIER_N) + BROADCAST_CONSTANT(vpbroadcastq, v_max_mul_c_64, r64_aux, STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER) + BROADCAST_CONSTANT(vpbroadcastd, v_add_low_k, r32_aux, CRUSH_RESISTANCE_CONST_LOWER_VALUE) + BROADCAST_CONSTANT(vpbroadcastd, v_add_up_k, r32_aux, CRUSH_RESISTANCE_CONST_UPPER_VALUE) + BROADCAST_CONSTANT(vpbroadcastq, v_n_inc, r64_aux, 0x00000008) if (m_jcp.out_data_type == element::f32) { - BROADCAST_R(vpbroadcastd, v_convert_0, r32_aux, 0x3f800000) - BROADCAST_R(vpbroadcastd, v_convert_1, r32_aux, 0x007fffff) - BROADCAST_P(vpbroadcastd, v_range, r64_aux, range_ptr) - BROADCAST_P(vpbroadcastd, v_min, r64_aux, min_ptr) + BROADCAST_CONSTANT(vpbroadcastd, v_convert_0, r32_aux, 0x3f800000) + BROADCAST_CONSTANT(vpbroadcastd, v_convert_1, r32_aux, 0x007fffff) + BROADCAST_PARAM(vpbroadcastd, v_range, r64_aux, GET_PHILOX_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastd, v_min, r64_aux, GET_PHILOX_OFFSET(min_ptr)) } else if (m_jcp.out_data_type == element::f16 && x64::mayiuse(x64::avx512_core_fp16)) { - BROADCAST_R(vpbroadcastw, v_convert_0, r16_aux, 0x3c00) - BROADCAST_R(vpbroadcastw, v_convert_1, r16_aux, 0x03ff) - BROADCAST_P(vpbroadcastw, v_range, r64_aux, range_ptr) - BROADCAST_P(vpbroadcastw, v_min, r64_aux, min_ptr) + BROADCAST_CONSTANT(vpbroadcastw, v_convert_0, r16_aux, 0x3c00) + BROADCAST_CONSTANT(vpbroadcastw, v_convert_1, r16_aux, 0x03ff) + BROADCAST_PARAM(vpbroadcastw, v_range, r64_aux, GET_PHILOX_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastw, v_min, r64_aux, GET_PHILOX_OFFSET(min_ptr)) } else if (m_jcp.out_data_type == element::bf16 && x64::mayiuse(x64::avx512_core_bf16)) { v_convert_2 = getVmm(); const auto ymm_min = Xbyak::Ymm(v_min.getIdx()); const auto ymm_range = Xbyak::Ymm(v_range.getIdx()); - BROADCAST_R(vpbroadcastw, v_convert_0, r16_aux, 0x3f80) - BROADCAST_R(vpbroadcastw, v_convert_1, r16_aux, 0x007f) - BROADCAST_R(vpbroadcastd, v_convert_2, r32_aux, 0x3f800000) + BROADCAST_CONSTANT(vpbroadcastw, v_convert_0, r16_aux, 0x3f80) + BROADCAST_CONSTANT(vpbroadcastw, v_convert_1, r16_aux, 0x007f) + BROADCAST_CONSTANT(vpbroadcastd, v_convert_2, r32_aux, 0x3f800000) - BROADCAST_P(vpbroadcastw, v_range, r64_aux, range_ptr) + BROADCAST_PARAM(vpbroadcastw, v_range, r64_aux, GET_PHILOX_OFFSET(range_ptr)) vpmovzxwd(v_range, ymm_range); uni_vpslld(v_range, v_range, 16); - BROADCAST_P(vpbroadcastw, v_min, r64_aux, min_ptr) + BROADCAST_PARAM(vpbroadcastw, v_min, r64_aux, GET_PHILOX_OFFSET(min_ptr)) vpmovzxwd(v_min, ymm_min); uni_vpslld(v_min, v_min, 16); } else if (m_jcp.out_data_type == element::i32) { const auto ymm_range = Xbyak::Ymm(v_range.getIdx()); - BROADCAST_P(vpbroadcastd, v_range, r64_aux, range_ptr) - BROADCAST_P(vpbroadcastd, v_min, r64_aux, min_ptr) + BROADCAST_PARAM(vpbroadcastd, v_range, r64_aux, GET_PHILOX_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastd, v_min, r64_aux, GET_PHILOX_OFFSET(min_ptr)) uni_vcvtdq2pd(v_range, ymm_range); } else { @@ -111,9 +133,9 @@ void RandomUniform::initVectors() { } // Initialize inputs. - BROADCAST_P(vpbroadcastq, v_key_64, r64_aux, key_ptr) - BROADCAST_P(vpbroadcastq, v_counter_64, r64_aux, counter_ptr) - BROADCAST_P(vpbroadcastq, v_n_64, r64_aux, n_ptr) + BROADCAST_PARAM(vpbroadcastq, v_key_64, r64_aux, GET_PHILOX_OFFSET(key_ptr)) + BROADCAST_PARAM(vpbroadcastq, v_counter_64, r64_aux, GET_PHILOX_OFFSET(counter_ptr)) + BROADCAST_PARAM(vpbroadcastq, v_n_64, r64_aux, GET_PHILOX_OFFSET(n_ptr)) if (m_jcp.out_data_type.size() <= 4) { static const uint64_t n_inc_arr[8] = {0, 1, 2, 3, 4, 5, 6, 7}; @@ -154,13 +176,10 @@ void RandomUniform::initVectors() { mov(r64_aux, reinterpret_cast(perm_16)); uni_vmovups(v_perm_16, ptr[r64_aux]); } - -#undef BROADCAST_R -#undef BROADCAST_P } template // Works for AVX2, SSE41 -void RandomUniform::initVectors() { +void PhiloxGenerator::initVectors() { const auto r64_aux = getReg64(); v_max_mul_n_64 = getVmm(); @@ -175,15 +194,6 @@ void RandomUniform::initVectors() { r64_n_inc = getReg64(); r64_min = getReg64(); -#define INIT_ARR(A, V, R, T) \ - static const T A[8] = {V, V, V, V, V, V, V, V}; \ - if (isa == x64::avx2) { \ - mov(R, reinterpret_cast(A)); \ - } else { \ - static const T* A##_aligned = A + (reinterpret_cast(A) % 16) / sizeof(T); \ - mov(R, reinterpret_cast(A##_aligned)); \ - } - // Initialize constants. INIT_ARR(max_mul_n_64, STATISTIC_MAXIMIZING_MULTIPLIER_N, r64_aux, uint64_t); uni_vmovups(v_max_mul_n_64, ptr[r64_aux]); @@ -206,11 +216,11 @@ void RandomUniform::initVectors() { INIT_ARR(convert_0, 0x3f800000, r64_convert_0, uint32_t); INIT_ARR(convert_1, 0x007fffff, r64_convert_1, uint32_t); - mov(r64_aux, ptr[r64_params + GET_OFF(range_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(range_ptr)]); uni_vpbroadcastd(v_range, ptr[r64_aux]); auto v_aux = getVmm(); - mov(r64_aux, ptr[r64_params + GET_OFF(min_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(min_ptr)]); uni_vpbroadcastd(v_aux, ptr[r64_aux]); static uint32_t min_arr[8]; mov(r64_min, reinterpret_cast(min_arr)); @@ -222,10 +232,10 @@ void RandomUniform::initVectors() { INIT_ARR(f64_pow_52, 0x4330000000000000, r64_f64_pow_52, uint64_t); - mov(r64_aux, ptr[r64_params + GET_OFF(range_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(range_ptr)]); uni_vpbroadcastd(v_range, ptr[r64_aux]); - mov(r64_aux, ptr[r64_params + GET_OFF(min_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(min_ptr)]); uni_vpbroadcastd(v_aux, ptr[r64_aux]); static uint32_t min_arr[8]; mov(r64_min, reinterpret_cast(min_arr)); @@ -240,13 +250,13 @@ void RandomUniform::initVectors() { } // Initialize inputs. - mov(r64_aux, ptr[r64_params + GET_OFF(key_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(key_ptr)]); uni_vpbroadcastq(v_key_64, ptr[r64_aux]); - mov(r64_aux, ptr[r64_params + GET_OFF(counter_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(counter_ptr)]); uni_vpbroadcastq(v_counter_64, ptr[r64_aux]); - mov(r64_aux, ptr[r64_params + GET_OFF(n_ptr)]); + mov(r64_aux, ptr[r64_params + GET_PHILOX_OFFSET(n_ptr)]); uni_vpbroadcastq(v_n_64, ptr[r64_aux]); if (m_jcp.out_data_type.size() <= 4) { @@ -267,12 +277,10 @@ void RandomUniform::initVectors() { } uni_vpaddq(v_n_64, v_n_64, ptr[r64_aux]); - -#undef INIT_ARR } template -void RandomUniform::process() { +void PhiloxGenerator::process() { auto v_dst_0 = getVmm(); auto v_dst_1 = getVmm(); std::vector v_res{v_dst_0, v_dst_1}; @@ -315,14 +323,14 @@ void RandomUniform::process() { } template -void RandomUniform::calculateRound(const Vmm& vmm_k_0, - const Vmm& vmm_k_1, - const Vmm& vmm_c_0, - const Vmm& vmm_c_1, - const Vmm& vmm_n_0, - const Vmm& vmm_n_1, - const Vmm& vmm_aux_0, - const Vmm& vmm_aux_1) { +void PhiloxGenerator::calculateRound(const Vmm& vmm_k_0, + const Vmm& vmm_k_1, + const Vmm& vmm_c_0, + const Vmm& vmm_c_1, + const Vmm& vmm_n_0, + const Vmm& vmm_n_1, + const Vmm& vmm_aux_0, + const Vmm& vmm_aux_1) { uni_vpmuludq(vmm_aux_0, vmm_n_0, v_max_mul_n_64); // {p0,p1,p0,p1} = {n0,_,n0,_} * {m0,_,m0,_} uni_vpmuludq(vmm_aux_1, vmm_c_0, v_max_mul_c_64); // {r0,r1,r0,r1} = {c0,_,c0,_} * {m0,_,m0,_} @@ -336,10 +344,10 @@ void RandomUniform::calculateRound(const Vmm& vmm_k_0, } template -void RandomUniform::runPhilox(const std::vector& vmm_dst, - const Vmm& vmm_key, - const Vmm& vmm_counter, - const Vmm& vmm_n) { +void PhiloxGenerator::runPhilox(const std::vector& vmm_dst, + const Vmm& vmm_key, + const Vmm& vmm_counter, + const Vmm& vmm_n) { auto vmm_k_0 = getVmm(); auto vmm_k_1 = getVmm(); auto vmm_n_0 = getVmm(); @@ -398,13 +406,13 @@ void RandomUniform::runPhilox(const std::vector& vmm_dst, } template -void RandomUniform::raiseKey(const Vmm& vmm_k_0, const Vmm& vmm_k_1) { +void PhiloxGenerator::raiseKey(const Vmm& vmm_k_0, const Vmm& vmm_k_1) { uni_vpaddd(vmm_k_0, vmm_k_0, v_add_low_k); // {k0,_,k0,_} + {l0,_,l0,_} uni_vpaddd(vmm_k_1, vmm_k_1, v_add_up_k); // {k1,_,k1,_} + {u0,_,u0,_} } template <> -void RandomUniform::convert(const std::vector& v_dst, const std::vector& v_src) { +void PhiloxGenerator::convert(const std::vector& v_dst, const std::vector& v_src) { if (m_jcp.out_data_type.size() == 4) { for (size_t i = 0lu; i < v_src.size(); i++) { const auto& vmm_src = v_src[i]; @@ -495,7 +503,7 @@ void RandomUniform::convert(const std::vector& v_dst, con } template // Works for AVX2, SSE41 -void RandomUniform::convert(const std::vector& v_dst, const std::vector& v_src) { +void PhiloxGenerator::convert(const std::vector& v_dst, const std::vector& v_src) { if (m_jcp.out_data_type.size() == 4) { for (size_t i = 0lu; i < v_src.size(); i++) { auto vmm_src = v_src[i]; @@ -587,7 +595,7 @@ void RandomUniform::convert(const std::vector& v_dst, const std::vecto } template <> -void RandomUniform::tail(const std::vector& vmm_dst) { +void PhiloxGenerator::tail(const std::vector& vmm_dst) { Xbyak::Label l_end; const auto k_rest_mask = getMask(); @@ -623,7 +631,7 @@ void RandomUniform::tail(const std::vector& vmm_dst) { } template <> -void RandomUniform::tail(const std::vector& vmm_dst) { +void PhiloxGenerator::tail(const std::vector& vmm_dst) { Xbyak::Label l_0, l_end; const auto step = vlen / sizeof(uint32_t); @@ -652,7 +660,7 @@ void RandomUniform::tail(const std::vector& vmm_dst) { } template -void RandomUniform::tail(const std::vector& vmm_dst) { +void PhiloxGenerator::tail(const std::vector& vmm_dst) { Xbyak::Label l_0, l_end; const auto step = vlen / sizeof(uint32_t); @@ -677,10 +685,735 @@ void RandomUniform::tail(const std::vector& vmm_dst) { L(l_end); } -template class RandomUniform; -template class RandomUniform; -template class RandomUniform; +//////////////// MERSENNE TWISTER GENERATOR //////////////////// + +template +MersenneTwisterGenerator::MersenneTwisterGenerator(const MersenneTwisterGeneratorCompileParams& jcp) + : JitKernel(jit_name(), jcp, isa) {} + +template +void MersenneTwisterGenerator::generate() { + this->preamble(); + registersPool = RegistersPool::create(isa, {rax, rcx, rsp, rdi, k0}); + + r64_dst = getReg64(); + r64_state = getReg64(); + r64_output_idx = getReg64(); + r64_max_output_idx = getReg64(); + r64_storage_capacity = getReg64(); + r64_elements_to_generate = getReg64(); + r64_state_accesses_count = getReg64(); + + mov(r64_dst, ptr[r64_params + GET_MERSENNE_OFFSET(dst_ptr)]); + mov(r64_state, ptr[r64_params + GET_MERSENNE_OFFSET(state_ptr)]); + mov(r64_output_idx, ptr[r64_params + GET_MERSENNE_OFFSET(output_idx)]); + mov(r64_max_output_idx, ptr[r64_params + GET_MERSENNE_OFFSET(max_output_idx)]); + mov(r64_elements_to_generate, ptr[r64_params + GET_MERSENNE_OFFSET(elements_to_generate)]); + mov(r64_state_accesses_count, ptr[r64_params + GET_MERSENNE_OFFSET(state_accesses_count)]); + mov(r64_storage_capacity, static_cast(vlen / sizeof(uint32_t))); + + initVectors(); + process(); + + registersPool.reset(); + this->postamble(); +} + +template <> +void MersenneTwisterGenerator::initVectors() { + const auto r64_aux = getReg64(); + const auto r32_aux = Xbyak::Reg32(r64_aux.getIdx()); + + v_min = getVmm(); + v_range = getVmm(); + v_state = getVmm(); + v_result = getVmm(); + v_const_1 = getVmm(); + v_const_2 = getVmm(); + + if (m_jcp.out_data_type.is_real()) { + v_mask = getVmm(); + v_divisor = getVmm(); + } + + // Initialize state + uni_vmovdqu(v_state, ptr[r64_state]); + + // Initialize constants + BROADCAST_CONSTANT(vpbroadcastd, v_const_1, r32_aux, MT_CONST_1) + BROADCAST_CONSTANT(vpbroadcastd, v_const_2, r32_aux, MT_CONST_2) + + // Initialize constants based on the requested data type + if (m_jcp.out_data_type == element::f32) { + FloatAsBits val; + val.f = 1.0f / (1 << 24); + BROADCAST_CONSTANT(vpbroadcastd, v_divisor, r32_aux, val.u); + BROADCAST_CONSTANT(vpbroadcastd, v_mask, r32_aux, static_cast((1 << 24) - 1)) + + BROADCAST_PARAM(vpbroadcastd, v_range, r64_aux, GET_MERSENNE_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastd, v_min, r64_aux, GET_MERSENNE_OFFSET(min_ptr)) + } else if (m_jcp.out_data_type == element::f16 && x64::mayiuse(x64::avx512_core_fp16)) { + FloatAsBits val; + val.f = 1.0f / (1 << 11); + BROADCAST_CONSTANT(vpbroadcastd, v_divisor, r32_aux, val.u); + BROADCAST_CONSTANT(vpbroadcastd, v_mask, r32_aux, static_cast((1 << 11) - 1)) + + // Note: two times too many values in Zmm + BROADCAST_PARAM(vpbroadcastw, v_range, r64_aux, GET_MERSENNE_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastw, v_min, r64_aux, GET_MERSENNE_OFFSET(min_ptr)) + } else if (m_jcp.out_data_type == element::bf16 && x64::mayiuse(x64::avx512_core_bf16)) { + FloatAsBits val; + val.f = 1.0f / (1 << 8); + BROADCAST_CONSTANT(vpbroadcastd, v_divisor, r32_aux, val.u); + BROADCAST_CONSTANT(vpbroadcastd, v_mask, r32_aux, static_cast((1 << 8) - 1)) + + // Note: two times too many values in Zmm + BROADCAST_PARAM(vpbroadcastw, v_range, r64_aux, GET_MERSENNE_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastw, v_min, r64_aux, GET_MERSENNE_OFFSET(min_ptr)) + } else if (m_jcp.out_data_type == element::i32) { + BROADCAST_PARAM(vpbroadcastd, v_range, r64_aux, GET_MERSENNE_OFFSET(range_ptr)) + BROADCAST_PARAM(vpbroadcastd, v_min, r64_aux, GET_MERSENNE_OFFSET(min_ptr)) + } else if (m_jcp.out_data_type == element::i64) { + // Same as in Philox - in scope of i64 enabling + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template // Works for AVX2, SSE41 +void MersenneTwisterGenerator::initVectors() { + const auto r64_aux = getReg64(); + + v_min = getVmm(); + v_range = getVmm(); + v_state = getVmm(); + v_result = getVmm(); + v_const_1 = getVmm(); + v_const_2 = getVmm(); + + // Initialize state + uni_vmovdqu(v_state, ptr[r64_state]); + + // Initialize constants. + INIT_ARR(const_1, MT_CONST_1, r64_aux, uint32_t); + uni_vmovdqu(v_const_1, ptr[r64_aux]); + + INIT_ARR(const_2, MT_CONST_2, r64_aux, uint32_t); + uni_vmovdqu(v_const_2, ptr[r64_aux]); + + if (m_jcp.out_data_type == element::f32) { + v_mask = getVmm(); + v_divisor = getVmm(); + + INIT_ARR(mask, static_cast((1 << 24) - 1), r64_aux, uint32_t); + uni_vmovups(v_mask, ptr[r64_aux]); + + INIT_ARR(divisor, static_cast(1.0f / (1 << 24)), r64_aux, float); + uni_vmovups(v_divisor, ptr[r64_aux]); + + mov(r64_aux, ptr[r64_params + GET_MERSENNE_OFFSET(range_ptr)]); + uni_vpbroadcastd(v_range, ptr[r64_aux]); + + mov(r64_aux, ptr[r64_params + GET_MERSENNE_OFFSET(min_ptr)]); + uni_vpbroadcastd(v_min, ptr[r64_aux]); + } else if (m_jcp.out_data_type == element::i32) { + mov(r64_aux, ptr[r64_params + GET_MERSENNE_OFFSET(range_ptr)]); + uni_vpbroadcastd(v_range, ptr[r64_aux]); + + mov(r64_aux, ptr[r64_params + GET_MERSENNE_OFFSET(min_ptr)]); + uni_vpbroadcastd(v_min, ptr[r64_aux]); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template +void MersenneTwisterGenerator::process() { + // Generate random numbers + generateRandomNumbers(); + + // Convert to output type, store result + convertToOutputTypeMersenne(); + + // Store results + storeResults(); +} + +template <> +void MersenneTwisterGenerator::generateRandomNumbers() { + // Difference between this implementation and SSE41/AVX2 is + // uni_vmovdqu -> vmovdqu32, uni fails with incorrect EVEX + // Other functions work just fine + const auto v_aux = getVmm(); + + // Load values from memory, copy + vmovdqu32(v_result, v_state); // x = state + + // Apply Mersenne Twister transformations + vmovdqu32(v_aux, v_result); // tmp = x + + // x ^= (x >> 11); + vpsrld(v_aux, v_aux, 11); // tmp >>= 11 + vpxord(v_result, v_result, v_aux); // x ^= tmp + + // x ^= (x << 7) & const_1; + vmovdqu32(v_aux, v_result); // tmp = x + vpslld(v_aux, v_aux, 7); // tmp <<= 7 + vpandd(v_aux, v_aux, v_const_1); // tmp &= const_1 + vpxord(v_result, v_result, v_aux); // x ^= tmp + + // x ^= (x << 15) & const_2; + vmovdqu32(v_aux, v_result); // tmp = x + vpslld(v_aux, v_aux, 15); // tmp <<= 15 + vpandd(v_aux, v_aux, v_const_2); // tmp &= const_2 + vpxord(v_result, v_result, v_aux); // x ^= tmp + + // x ^= (x >> 18); + vmovdqu32(v_aux, v_result); // tmp = x + vpsrld(v_aux, v_aux, 18); // tmp >>= 18 + vpxord(v_result, v_result, v_aux); // x ^= tmp +} + +template // Works for SSE41, AVX2 +void MersenneTwisterGenerator::generateRandomNumbers() { + const auto v_aux = getVmm(); + + // Load values from memory, copy + uni_vmovdqu(v_result, v_state); // x = state + + // Apply Mersenne Twister transformations + uni_vmovdqu(v_aux, v_result); // tmp = x + + // x ^= (x >> 11); + uni_vpsrld(v_aux, v_aux, 11); // tmp >>= 11 + uni_vpxor(v_result, v_result, v_aux); // x ^= tmp + + // x ^= (x << 7) & const_1; + uni_vmovdqu(v_aux, v_result); // tmp = x + uni_vpslld(v_aux, v_aux, 7); // tmp <<= 7 + uni_vpand(v_aux, v_aux, v_const_1); // tmp &= const_1 + uni_vpxor(v_result, v_result, v_aux); // x ^= tmp + + // x ^= (x << 15) & const_2; + uni_vmovdqu(v_aux, v_result); // tmp = x + uni_vpslld(v_aux, v_aux, 15); // tmp <<= 15 + uni_vpand(v_aux, v_aux, v_const_2); // tmp &= const_2 + uni_vpxor(v_result, v_result, v_aux); // x ^= tmp + + // x ^= (x >> 18); + uni_vmovdqu(v_aux, v_result); // tmp = x + uni_vpsrld(v_aux, v_aux, 18); // tmp >>= 18 + uni_vpxor(v_result, v_result, v_aux); // x ^= tmp +} + +template <> +void MersenneTwisterGenerator::convertToOutputTypeMersenne() { + if (m_jcp.out_data_type == element::f32) { + // Apply mask and divisor + // No need to do int32's voodoo with double since mask ensures + // that most significant bit is 0 + vpandd(v_result, v_result, v_mask); + vcvtdq2ps(v_result, v_result); + vmulps(v_result, v_result, v_divisor); + + // Scale and shift + vmulps(v_result, v_result, v_range); + vaddps(v_result, v_result, v_min); + } else if (m_jcp.out_data_type == element::f16) { + // Apply mask and divisor + vpandd(v_result, v_result, v_mask); + vcvtdq2ps(v_result, v_result); + vmulps(v_result, v_result, v_divisor); + + vcvtps2ph(v_result, v_result, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); + + // View only half as v_result shrunk from 32->16 conversion + auto ymm_result = Xbyak::Ymm(v_result); + auto ymm_range = Xbyak::Ymm(v_range); + auto ymm_min = Xbyak::Ymm(v_min); + + // Scale and shift + vmulph(ymm_result, ymm_result, ymm_range); + vaddph(v_result, ymm_result, ymm_min); + } else if (m_jcp.out_data_type == element::bf16) { + // Apply mask and divisor + vpandd(v_result, v_result, v_mask); + vcvtdq2ps(v_result, v_result); + vmulps(v_result, v_result, v_divisor); + + // Pseudoconvert to f32 by bitshift + vpslld(v_range, v_range, 16); + vpslld(v_min, v_min, 16); + + // Scale and shift + vmulps(v_result, v_range); + vaddps(v_result, v_min); + + vcvtneps2bf16(v_result, v_result); + } else if (m_jcp.out_data_type == element::i32) { + // Split result before converting 32 -> 64 to fit new bits + const auto v_result_high_double = getVmm(); + const auto v_result_low_double = getVmm(); + const auto v_range_double = getVmm(); + + const auto y_result_high_double = Xbyak::Ymm(v_result_high_double.getIdx()); + const auto y_result_low_double = Xbyak::Ymm(v_result_low_double.getIdx()); + const auto y_range_double = Xbyak::Ymm(v_range_double.getIdx()); + + vextracti32x8(y_result_high_double, v_result, 1); + vextracti32x8(y_result_low_double, v_result, 0); + vextracti32x8(y_range_double, v_range, 0); + + // Extract the most significant bit (MSB) using bitshift + const auto v_msb_high_double = getVmm(); + const auto v_msb_low_double = getVmm(); + const auto y_msb_high_double = Xbyak::Ymm(v_msb_high_double.getIdx()); + const auto y_msb_low_double = Xbyak::Ymm(v_msb_low_double.getIdx()); + + vpsrld(y_msb_high_double, y_result_high_double, 31); + vpsrld(y_msb_low_double, y_result_low_double, 31); + + // Remove most significant digit from result by bitshift + // One left (removes msb) + vpslld(y_result_high_double, y_result_high_double, 1); + vpslld(y_result_low_double, y_result_low_double, 1); + + // One right (shifts back, sets 0 at the front) + vpsrld(y_result_high_double, y_result_high_double, 1); + vpsrld(y_result_low_double, y_result_low_double, 1); + + // Create a double value of 2^31 for the most significant digit instead of -1 + const auto r64_multiplier_double = getReg64(); + const auto v_multiplier_double = getVmm(); + + mov(r64_multiplier_double, 0x41E0000000000000); // 2^31 in IEEE 754 double format + vpbroadcastq(v_multiplier_double, r64_multiplier_double); + + // Convert most significant digit to double (either 0 or 1) + vcvtdq2pd(v_msb_high_double, y_msb_high_double); + vcvtdq2pd(v_msb_low_double, y_msb_low_double); + + // Multiply (0/1) * 2^31 + vmulpd(v_msb_high_double, v_msb_high_double, v_multiplier_double); + vmulpd(v_msb_low_double, v_msb_low_double, v_multiplier_double); + + // Convert uint32_t to double for accuracy + vcvtdq2pd(v_result_high_double, y_result_high_double); + vcvtdq2pd(v_result_low_double, y_result_low_double); + vcvtdq2pd(v_range_double, y_range_double); + + // Add sign as 2^31 if was present, correctly converting uint32_t to double + vaddpd(v_result_high_double, v_result_high_double, v_msb_high_double); + vaddpd(v_result_low_double, v_result_low_double, v_msb_low_double); + + // Compute approximate division + const auto v_aprox_result_high_double = getVmm(); + const auto v_aprox_result_low_double = getVmm(); + vdivpd(v_aprox_result_high_double, v_result_high_double, v_range_double); // value / range = (aux = aux / aux2) + vdivpd(v_aprox_result_low_double, v_result_low_double, v_range_double); // value / range = (aux = aux / aux2) + + // Floor the result to nearest int (biggest multiple of divisor) + vrndscalepd(v_aprox_result_high_double, v_aprox_result_high_double, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + vrndscalepd(v_aprox_result_low_double, v_aprox_result_low_double, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + + // Compute closest divisible value by multiplying back + vmulpd(v_aprox_result_high_double, + v_aprox_result_high_double, + v_range_double); // aux = floor(double(result) / double(range)) * double(range) + vmulpd(v_aprox_result_low_double, + v_aprox_result_low_double, + v_range_double); // aux = floor(double(result) / double(range)) * double(range) + + // Compute remainder by subtracting approximation from the original + vsubpd(v_result_high_double, v_result_high_double, v_aprox_result_high_double); + vsubpd(v_result_low_double, v_result_low_double, v_aprox_result_low_double); + + // Convert 64 -> 32, always possible as 0 < result < range + vcvtpd2dq(y_result_high_double, v_result_high_double); // value - closest_div_value = remainder (modulo) + vcvtpd2dq(y_result_low_double, v_result_low_double); // value - closest_div_value = remainder (modulo) + + // Concatenate them back, now result holds all remainders (modulos) + vinserti32x8(v_result, v_result, y_result_high_double, 1); + vinserti32x8(v_result, v_result, y_result_low_double, 0); + + // Add minimum + vpaddd(v_result, v_result, v_min); // remainder + min + } else if (m_jcp.out_data_type == element::i64 && m_jcp.optimized) { + // Same as in Philox - in scope of i64 enabling + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else if (m_jcp.out_data_type == element::i64 && !m_jcp.optimized) { + // Same as in Philox - in scope of i64 enabling + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template <> +void MersenneTwisterGenerator::convertToOutputTypeMersenne() { + if (m_jcp.out_data_type == element::f32) { + // Apply mask and divisor + // No need to do int32's voodoo with double since mask ensures + // that most significant bit is 0 + vpand(v_result, v_result, v_mask); + vcvtdq2ps(v_result, v_result); + vmulps(v_result, v_divisor); + + // Scale and shift + vmulps(v_result, v_range); + vaddps(v_result, v_min); + } else if (m_jcp.out_data_type == element::i32) { + // Split result before converting 32 -> 64 to fit new bits + const auto v_result_high_double = getVmm(); + const auto v_result_low_double = getVmm(); + const auto v_range_double = getVmm(); + + const auto x_result_high_double = Xbyak::Xmm(v_result_high_double.getIdx()); + const auto x_result_low_double = Xbyak::Xmm(v_result_low_double.getIdx()); + const auto x_range_double = Xbyak::Xmm(v_range_double.getIdx()); + + vextracti128(x_result_high_double, v_result, 1); + vextracti128(x_result_low_double, v_result, 0); + vextracti128(x_range_double, v_range, 0); + + // Extract the most significant bit (MSB) using bitshift + const auto v_msb_high_double = getVmm(); + const auto v_msb_low_double = getVmm(); + const auto x_msb_high_double = Xbyak::Xmm(v_msb_high_double.getIdx()); + const auto x_msb_low_double = Xbyak::Xmm(v_msb_low_double.getIdx()); + + vpsrld(x_msb_high_double, x_result_high_double, 31); + vpsrld(x_msb_low_double, x_result_low_double, 31); + + // Remove most significant digit from result by bitshift + // One left (removes msb) + vpslld(x_result_high_double, x_result_high_double, 1); + vpslld(x_result_low_double, x_result_low_double, 1); + + // One right (shifts back, sets 0 at the front) + vpsrld(x_result_high_double, x_result_high_double, 1); + vpsrld(x_result_low_double, x_result_low_double, 1); + + // Create a double value of 2^31 for the most significant digit instead of -1 + const auto r64_multiplier_double = getReg64(); + const auto v_multiplier_double = getVmm(); + const auto x_multiplier_double = Xbyak::Xmm(v_multiplier_double.getIdx()); + + mov(r64_multiplier_double, 0x41E0000000000000); // 2^31 in IEEE 754 double format + vmovq(x_multiplier_double, r64_multiplier_double); + vbroadcastsd(v_multiplier_double, x_multiplier_double); + + // Convert most significant digit to double (either 0 or 1) + vcvtdq2pd(v_msb_high_double, x_msb_high_double); + vcvtdq2pd(v_msb_low_double, x_msb_low_double); + + // Multiply (0/1) * 2^31 + vmulpd(v_msb_high_double, v_msb_high_double, v_multiplier_double); + vmulpd(v_msb_low_double, v_msb_low_double, v_multiplier_double); + + // Convert uint32_t to double for accuracy + vcvtdq2pd(v_result_high_double, x_result_high_double); + vcvtdq2pd(v_result_low_double, x_result_low_double); + vcvtdq2pd(v_range_double, x_range_double); + + // Add sign as 2^31 if was present, correctly converting uint32_t to double + vaddpd(v_result_high_double, v_result_high_double, v_msb_high_double); + vaddpd(v_result_low_double, v_result_low_double, v_msb_low_double); + + // Compute approximate division + const auto v_aprox_result_high_double = getVmm(); + const auto v_aprox_result_low_double = getVmm(); + vdivpd(v_aprox_result_high_double, v_result_high_double, v_range_double); // value / range = (aux = aux / aux2) + vdivpd(v_aprox_result_low_double, v_result_low_double, v_range_double); // value / range = (aux = aux / aux2) + + // Floor the result to nearest int (biggest multiple of divisor) + vroundpd(v_aprox_result_high_double, v_aprox_result_high_double, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + vroundpd(v_aprox_result_low_double, v_aprox_result_low_double, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + + // Compute closest divisible value by multiplying back + vmulpd(v_aprox_result_high_double, + v_aprox_result_high_double, + v_range_double); // aux = floor(double(result) / double(range)) * double(range) + vmulpd(v_aprox_result_low_double, + v_aprox_result_low_double, + v_range_double); // aux = floor(double(result) / double(range)) * double(range) + + // Compute remainder by subtracting approximation from the original + vsubpd(v_result_high_double, v_result_high_double, v_aprox_result_high_double); + vsubpd(v_result_low_double, v_result_low_double, v_aprox_result_low_double); + + // Convert 64 -> 32, always possible as 0 < result < range + vcvtpd2dq(x_result_high_double, v_result_high_double); // value - closest_div_value = remainder (modulo) + vcvtpd2dq(x_result_low_double, v_result_low_double); // value - closest_div_value = remainder (modulo) + + // Concatenate them back, now result holds all remainders (modulos) + vinserti128(v_result, v_result, x_result_high_double, 1); + vinserti128(v_result, v_result, x_result_low_double, 0); + + // Add minimum + vpaddd(v_result, v_result, v_min); // remainder + min + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template // Works for SSE41 +void MersenneTwisterGenerator::convertToOutputTypeMersenne() { + if (m_jcp.out_data_type == element::f32) { + // Apply mask and divisor + // No need to do int32's voodoo with double since mask ensures + // that most significant bit is 0 + pand(v_result, v_mask); + cvtdq2ps(v_result, v_result); + mulps(v_result, v_divisor); + + // Scale and shift + mulps(v_result, v_range); + addps(v_result, v_min); + } else if (m_jcp.out_data_type == element::i32) { + // Split result before converting 32 -> 64 to fit new bits + const auto r64_result_high_double = getReg64(); + const auto r64_result_low_double = getReg64(); + const auto r64_range_double = getReg64(); + + pextrq(r64_result_high_double, v_result, 1); + pextrq(r64_result_low_double, v_result, 0); + pextrq(r64_range_double, v_range, 0); + + // Extract the most significant bit (MSB) + const auto v_msb_high_double = getVmm(); + const auto v_msb_low_double = getVmm(); + const auto r64_aux = getReg64(); + const auto v_result_aux = getVmm(); + + mov(r64_aux, 8000000080000000); // Bitmask to check for leading 1 + + movq(v_result_aux, r64_aux); + movq(v_msb_high_double, r64_result_high_double); + movq(v_msb_low_double, r64_result_low_double); + + and_(v_msb_high_double, v_result_aux); + psrld(v_msb_high_double, 31); + + and_(v_msb_low_double, v_result_aux); + psrld(v_msb_low_double, 31); + + // Convert most significant digit to double (either 0 or 1) + cvtdq2pd(v_msb_high_double, v_msb_high_double); + cvtdq2pd(v_msb_low_double, v_msb_low_double); + + // Remove most significant digit from result + mov(r64_aux, 0x7FFFFFFF7FFFFFFF); + and_(r64_result_high_double, r64_aux); + and_(r64_result_low_double, r64_aux); + + // Create a double value of 2^31 for the most significant digit instead of -1 + mov(r64_aux, 0x41E0000000000000); // 2^31 in IEEE 754 double format + movq(v_result_aux, r64_aux); // v_result_aux reused to store multiplier + pshufd(v_result_aux, v_result_aux, 0x11); + + // Multiply (0/1) * 2^31 + mulpd(v_msb_high_double, v_result_aux); + mulpd(v_msb_low_double, v_result_aux); + + // Convert uint32_t to double for accuracy + const auto v_result_high_double = getVmm(); + const auto v_result_low_double = getVmm(); + const auto v_range_double = getVmm(); + + movq(v_result_high_double, r64_result_high_double); + movq(v_result_low_double, r64_result_low_double); + movq(v_range_double, r64_range_double); + + cvtdq2pd(v_result_high_double, v_result_high_double); + cvtdq2pd(v_result_low_double, v_result_low_double); + cvtdq2pd(v_range_double, v_range_double); + + // Add sign as 2^31 if was present, correctly converting uint32_t to double + addpd(v_result_high_double, v_msb_high_double); + addpd(v_result_low_double, v_msb_low_double); + + // Compute approximate division + const auto v_aprox_result_high_double = getVmm(); + const auto v_aprox_result_low_double = getVmm(); + + movups(v_aprox_result_high_double, v_result_high_double); + movups(v_aprox_result_low_double, v_result_low_double); + divpd(v_aprox_result_high_double, v_range_double); // value / range = (aux = aux / aux2) + divpd(v_aprox_result_low_double, v_range_double); // value / range = (aux = aux / aux2) + + // Floor the result to nearest int (biggest multiple of divisor) + roundpd(v_aprox_result_high_double, v_aprox_result_high_double, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + roundpd(v_aprox_result_low_double, v_aprox_result_low_double, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + + // Compute closest divisible value by multiplying back + mulpd(v_aprox_result_high_double, + v_range_double); // aux = floor(double(result) / double(range)) * double(range) + mulpd(v_aprox_result_low_double, + v_range_double); // aux = floor(double(result) / double(range)) * double(range) + + // Compute remainder by subtracting approximation from the original + subpd(v_result_high_double, v_aprox_result_high_double); + subpd(v_result_low_double, v_aprox_result_low_double); + + // Convert 64 -> 32, always possible as 0 < result < range + cvtpd2dq(v_result_high_double, v_result_high_double); // value - closest_div_value = remainder (modulo) + cvtpd2dq(v_result_low_double, v_result_low_double); // value - closest_div_value = remainder (modulo) + + movq(r64_result_high_double, v_result_high_double); + movq(r64_result_low_double, v_result_low_double); + + // Concatenate them back, now result holds all remainders (modulos) + pinsrq(v_result, r64_result_high_double, 1); + pinsrq(v_result, r64_result_low_double, 0); + + // Add minimum + paddd(v_result, v_min); // remainder + min + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template <> +void MersenneTwisterGenerator::storeResults() { + const auto r64_aux = getReg64(); + const auto v_rest_mask = getMask(); + + if (m_jcp.out_data_type.size() == sizeof(uint32_t)) { + // Find minimum count from elements_to_generate and storage_capacity + mov(r64_aux, r64_elements_to_generate); + cmp(r64_aux, r64_storage_capacity); + cmovg(r64_aux, r64_storage_capacity); + + fillRestWorkMask(v_rest_mask, r64_aux); + vmovdqu32(ptr[r64_dst] | v_rest_mask, v_result); + } else if (m_jcp.out_data_type.size() == sizeof(uint16_t)) { + mov(r64_aux, r64_elements_to_generate); + cmp(r64_aux, r64_storage_capacity); + cmovg(r64_aux, r64_storage_capacity); + + // Store only the bottom half of the register + auto ymm_result = Xbyak::Ymm(v_result); + fillRestWorkMask(v_rest_mask, r64_aux); + vmovdqu16(ptr[r64_dst] | v_rest_mask, ymm_result); + } else if (m_jcp.out_data_type.size() == sizeof(uint64_t)) { + // i64 enablement + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template <> +void MersenneTwisterGenerator::storeResults() { + const auto r64_aux = getReg64(); + auto v_rest_mask = getMask(); + + if (m_jcp.out_data_type.size() == sizeof(uint32_t)) { + // Find minimum count from elements_to_generate and storage_capacity + mov(r64_aux, r64_elements_to_generate); + cmp(r64_aux, r64_storage_capacity); + cmovg(r64_aux, r64_storage_capacity); + + fillRestWorkMask(v_rest_mask, r64_aux, m_jcp.out_data_type.size()); + vmaskmovps(ptr[r64_dst], v_rest_mask, v_result); + } else if (m_jcp.out_data_type.size() == sizeof(uint16_t)) { + // AVX2 does not support 16 bit value transfer + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else if (m_jcp.out_data_type.size() == sizeof(uint64_t)) { + // i64 enablement + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template // Works for SSE41 +void MersenneTwisterGenerator::storeResults() { + const auto r64_aux = getReg64(); + + if (m_jcp.out_data_type.size() == sizeof(uint32_t)) { + // Find minimum count from elements_to_generate and storage_capacity + auto v_rest_mask = getMask(); + mov(r64_aux, r64_elements_to_generate); + cmp(r64_aux, r64_storage_capacity); + cmovg(r64_aux, r64_storage_capacity); + store(ptr[r64_dst], v_result, r64_aux, m_jcp.out_data_type.size()); + } else if (m_jcp.out_data_type.size() == sizeof(uint16_t)) { + // SSE41 does not support 16 bit value transfer + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else if (m_jcp.out_data_type.size() == sizeof(uint64_t)) { + // i64 enablement + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", + m_jcp.out_data_type, + " for ", + x64::get_isa_info()); + } +} + +template class PhiloxGenerator; +template class PhiloxGenerator; +template class PhiloxGenerator; + +template class MersenneTwisterGenerator; +template class MersenneTwisterGenerator; +template class MersenneTwisterGenerator; + +#undef INIT_ARR +#undef BROADCAST_PARAM +#undef BROADCAST_CONSTANT +#undef GET_MERSENNE_OFFSET +#undef GET_PHILOX_OFFSET +} // namespace random_uniform } // namespace kernel } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp index 4b447ef7b459ea..81081db86ab926 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp @@ -11,12 +11,18 @@ namespace ov { namespace intel_cpu { namespace kernel { +namespace random_uniform { -struct RandomUniformCompileParams { +struct PhiloxGeneratorCompileParams { element::Type out_data_type = element::f32; }; -struct RandomUniformCallArgs { +struct MersenneTwisterGeneratorCompileParams { + element::Type out_data_type = element::f32; + bool optimized = false; +}; + +struct PhiloxGeneratorCallArgs { void* dst_ptr; const void* key_ptr; const void* counter_ptr; @@ -26,12 +32,23 @@ struct RandomUniformCallArgs { uint64_t work_amount = 0lu; }; +struct MersenneTwisterGeneratorCallArgs { + void* dst_ptr; + void* state_ptr; + const void* min_ptr; + const void* range_ptr; + uint64_t output_idx = 0; + uint64_t max_output_idx = 0; + uint64_t state_accesses_count = 0lu; + int64_t elements_to_generate = 0lu; +}; + template -class RandomUniform : public JitKernel { +class PhiloxGenerator : public JitKernel { public: - DECLARE_CPU_JIT_AUX_FUNCTIONS(RandomUniform) + DECLARE_CPU_JIT_AUX_FUNCTIONS(PhiloxGenerator) - explicit RandomUniform(const RandomUniformCompileParams& jcp); + explicit PhiloxGenerator(const PhiloxGeneratorCompileParams& jcp); void generate() override; @@ -102,6 +119,71 @@ class RandomUniform : public JitKernel +class MersenneTwisterGenerator + : public JitKernel { +public: + DECLARE_CPU_JIT_AUX_FUNCTIONS(MersenneTwisterGenerator) + + explicit MersenneTwisterGenerator(const MersenneTwisterGeneratorCompileParams& jcp); + + void generate() override; + +private: + using Vmm = typename dnnl::impl::utils::conditional3::type; + using Vmask = typename dnnl::impl::utils::conditional3::type; + + RegistersPool::Reg r64_dst; + RegistersPool::Reg r64_state; + RegistersPool::Reg r64_state_accesses_count; + RegistersPool::Reg r64_elements_to_generate; + RegistersPool::Reg r64_storage_capacity; + RegistersPool::Reg r64_output_idx; + RegistersPool::Reg r64_max_output_idx; + + const Xbyak::Reg64 r64_params = Xbyak::Reg64(dnnl::impl::cpu::x64::abi_param_regs[0]); + + // Vector registers for input storage. + RegistersPool::Reg v_dst; + RegistersPool::Reg v_state; + RegistersPool::Reg v_min; + RegistersPool::Reg v_range; + + // Vector registers for generation. + RegistersPool::Reg v_const_1; // for MT_CONST_1 + RegistersPool::Reg v_const_2; // for MT_CONST_2 + + // Vector registers for conversion. + RegistersPool::Reg v_mask; + RegistersPool::Reg v_divisor; + + // Output vector register + RegistersPool::Reg v_result; + + void initVectors(); + + void process(); + + void generateRandomNumbers(); + + void convertToOutputTypeMersenne(); + + void storeResults(); + + // Mersenne Twister constants + static constexpr uint32_t MT_CONST_1 = 0x9D2C5680; + static constexpr uint32_t MT_CONST_2 = 0xEFC60000; +}; + +} // namespace random_uniform } // namespace kernel } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp index 081e29341fda99..ede73f07b1014b 100644 --- a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp @@ -12,6 +12,18 @@ namespace ov { namespace intel_cpu { namespace node { +// Following const values are taken from the original paper: +// https://www.thesalmons.org/john/random123/papers/random123sc11.pdf +constexpr uint32_t CRUSH_RESISTANCE_CONST_LOWER_VALUE = 0x9E3779B9; +constexpr uint32_t CRUSH_RESISTANCE_CONST_UPPER_VALUE = 0xBB67AE85; +constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_N = 0xD2511F53; +constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER = 0xCD9E8D57; + +// Following const values are taken from the original paper (used by PyTorch): +// https://dl.acm.org/doi/pdf/10.1145/272991.272995 +constexpr int32_t MERSENNE_STATE_N = 624; +constexpr int32_t MERSENNE_STATE_M = 397; + bool RandomUniform::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { @@ -19,10 +31,6 @@ bool RandomUniform::isSupportedOperation(const std::shared_ptr& errorMessage = "Only RandomUniform operation from the opset8 is supported by the CPU plugin."; return false; } - if (as_type_ptr(op)->get_alignment() != op::PhiloxAlignment::TENSORFLOW) { - errorMessage = "Only TENSORFLOW alignment mode is supported by the CPU plugin."; - return false; - } } catch (...) { return false; } @@ -46,6 +54,18 @@ RandomUniform::RandomUniform(const std::shared_ptr& op, const GraphCon m_op_seed = rnd_op->get_op_seed(); m_output_prc = op->get_output_element_type(0); + const auto alignment = rnd_op->get_alignment(); + switch (alignment) { + case ov::op::PhiloxAlignment::TENSORFLOW: + m_algo = PHILOX; + break; + case ov::op::PhiloxAlignment::PYTORCH: + m_algo = MERSENNE_TWISTER; + break; + default: + THROW_CPU_NODE_ERR("Alignment of RandomUniform ", alignment, " is not supported by the CPU plugin."); + } + for (size_t i = 0lu; i < op->get_input_size(); i++) { if (is_type(op->get_input_node_ptr(i))) { m_const_inputs[i] = true; @@ -73,14 +93,21 @@ void RandomUniform::initSupportedPrimitiveDescriptors() { } auto out_prc = getOriginalOutputPrecisionAtPort(0); - if (out_prc.is_real() && - ((m_algo == PHILOX && !one_of(out_prc, ov::element::f32, ov::element::f16, ov::element::bf16)) || - (m_algo == STL && !one_of(out_prc, ov::element::f32)))) { - out_prc = ov::element::f32; - } - if (!out_prc.is_real() && !one_of(out_prc, ov::element::i32, ov::element::i64)) { - out_prc = ov::element::i32; + if (out_prc.is_real()) { + if (one_of(m_algo, PHILOX, MERSENNE_TWISTER) && + !one_of(out_prc, ov::element::f32, ov::element::f16, ov::element::bf16)) { + out_prc = ov::element::f32; + } + + if (one_of(m_algo, STL) && !one_of(out_prc, ov::element::f32)) { + out_prc = ov::element::f32; + } + } else { + if (!one_of(out_prc, ov::element::i32, ov::element::i64)) { + out_prc = ov::element::i32; + } } + m_output_prc = out_prc; addSupportedPrimDesc({{LayoutType::ncsp, shape_prc, m_const_inputs[SHAPE]}, @@ -99,29 +126,7 @@ void RandomUniform::createPrimitive() { evalRange(); } - if (m_algo == PHILOX) { -#if defined(OPENVINO_ARCH_X86_64) - kernel::RandomUniformCompileParams jcp; - - jcp.out_data_type = m_output_prc; - - m_jit_kernel = kernel::JitKernel::createInstance(jcp); - - if (m_jit_kernel) { - if (auto selected_pd = getSelectedPrimitiveDescriptor()) { - using namespace dnnl::impl::cpu; - if (m_jit_kernel->getIsa() == x64::avx512_core) { - selected_pd->setImplementationType(jit_avx512); - } else if (m_jit_kernel->getIsa() == x64::avx2) { - selected_pd->setImplementationType(jit_avx2); - } else if (m_jit_kernel->getIsa() == x64::sse41) { - selected_pd->setImplementationType(jit_sse42); - } - } - } -#endif // OPENVINO_ARCH_X86_64 - } + prepareGeneratorKernel(); if (m_const_inputs[SHAPE]) { Node::createPrimitive(); @@ -137,51 +142,13 @@ bool RandomUniform::needPrepareParams() const { void RandomUniform::prepareParams() { m_out_shape = getDstMemoryAtPort(0)->getShape().getStaticDims(); - m_out_el_num = std::accumulate(m_out_shape.begin(), m_out_shape.end(), 1lu, std::multiplies()); + m_output_elements_count = std::accumulate(m_out_shape.begin(), m_out_shape.end(), 1lu, std::multiplies()); if (m_algo == PHILOX) { - m_skip_count = m_out_el_num * SKIP_CONST; - - if (m_out_el_num < PHILOX_PARALLEL_EXECUTION_THRESHOLD) { - m_threads_num = 1; - } else { - m_threads_num = parallel_get_max_threads(); - } - m_thread_params.resize(m_threads_num); - - parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { - auto& p = m_thread_params[ithr]; - uint64_t start = 0lu, end = 0lu; - - if (m_jit_kernel) { -#if defined(OPENVINO_ARCH_X86_64) - const auto block_size = (m_jit_kernel->getVectorLen() / m_output_prc.size()) * 2; - const auto blocks_num = (m_out_el_num + block_size - 1) / block_size; - const auto blocks_per_thr = (blocks_num + nthr - 1) / nthr; - - start = ithr * blocks_per_thr * block_size; - end = (ithr + 1) * blocks_per_thr * block_size; -#endif // OPENVINO_ARCH_X86_64 - } else { - const auto groups_num = (m_out_el_num + PHILOX_GROUP_SIZE - 1) / PHILOX_GROUP_SIZE; - const auto groups_per_thr = (groups_num + nthr - 1) / nthr; - - start = ithr * groups_per_thr * PHILOX_GROUP_SIZE; - end = (ithr + 1) * groups_per_thr * PHILOX_GROUP_SIZE; - - p.step = m_output_prc.size() > 4 ? 2 : 4; - } - - if (end > m_out_el_num) { - end = m_out_el_num; - } - if (start > end) { - start = end; - } - p.work_amount = end - start; - p.n_shift = start / PHILOX_GROUP_SIZE; - p.dst_shift = start * m_output_prc.size(); - }); + m_skip_count = m_output_elements_count * SKIP_CONST; + preparePhiloxParams(); + } else if (m_algo == MERSENNE_TWISTER) { + prepareMersenneTwisterParams(); } } @@ -200,11 +167,13 @@ void RandomUniform::execute(dnnl::stream strm) { auto data = getDstDataAtPort(0); if (m_algo == PHILOX) { - m_state = computePhilox(data, m_out_el_num, m_state); + m_state = computePhilox(data, m_output_elements_count, m_state); + } else if (m_algo == MERSENNE_TWISTER) { + computeMersenneTwister(data, m_output_elements_count); } else if (m_algo == STL) { - computeStl(data, m_out_el_num); + computeStl(data, m_output_elements_count); } else { - THROW_CPU_NODE_ERR("unsupported algorithm."); + THROW_CPU_NODE_ERR("does not support the selected algorithm."); } } @@ -212,16 +181,247 @@ void RandomUniform::executeDynamicImpl(dnnl::stream strm) { execute(strm); } +std::string RandomUniform::getPrimitiveDescriptorType() const { + auto selectedPrimitiveDesc = getSelectedPrimitiveDescriptor(); + + impl_desc_type type = impl_desc_type::undef; + if (selectedPrimitiveDesc) { + type = selectedPrimitiveDesc->getImplementationType(); + } + + std::string str_type; + + auto add_type = [&](std::string t) { + if (!str_type.empty() && t.c_str()[0] != '_') + str_type += "_"; + str_type += t; + }; + +#define SEARCH_TYPE(_type) \ + if ((type & impl_desc_type::_type) == impl_desc_type::_type) \ + add_type(#_type) + + SEARCH_TYPE(undef); + SEARCH_TYPE(jit); + SEARCH_TYPE(ref); + + SEARCH_TYPE(avx512); + SEARCH_TYPE(avx2); + SEARCH_TYPE(sse42); + SEARCH_TYPE(any); + +#undef SEARCH_TYPE + + if (type == impl_desc_type::unknown) + str_type = "unknown"; + else if (str_type.empty()) + str_type = "undef"; + + if (selectedPrimitiveDesc) { + if (selectedPrimitiveDesc->getConfig().outConfs[0].getMemDesc()->getPrecision() != ov::element::u8) { + str_type += + "_" + std::string( + selectedPrimitiveDesc->getConfig().outConfs[0].getMemDesc()->getPrecision().get_type_name()); + } else { + str_type += "_I8"; + } + } + + return str_type; +} + +bool RandomUniform::needShapeInfer() const { + return !m_const_inputs[SHAPE]; +} + +bool RandomUniform::isExecutable() const { + return !isInputTensorAtPortEmpty(SHAPE); +} + +bool RandomUniform::created() const { + return getType() == Type::RandomUniform; +} + +//////////////////////////////////////////////// + +void RandomUniform::evalRange() { +#define EL_CASE(E) \ + case element::E: \ + m_range_val.E = m_max_val.E - m_min_val.E; \ + break; + + switch (m_output_prc) { + EL_CASE(f64) + EL_CASE(f32) + EL_CASE(f16) + EL_CASE(bf16) + EL_CASE(i64) + EL_CASE(i32) + default: + THROW_CPU_NODE_ERR("has unsupported output precision: ", m_output_prc); + } + +#undef EL_CASE +} + +void RandomUniform::initEdgeValues(OutputType& dst, const void* src, const element::Type& output_type) { +#define EL_CASE(E) \ + case element::E: \ + dst.E = *reinterpret_cast::value_type*>(src); \ + break; + + switch (output_type) { + EL_CASE(f64) + EL_CASE(f32) + EL_CASE(f16) + EL_CASE(bf16) + EL_CASE(i64) + EL_CASE(i32) + default: + THROW_CPU_NODE_ERR("has unsupported output precision: ", output_type); + } + +#undef EL_CASE +} + +void RandomUniform::preparePhiloxParams() { + if (m_output_elements_count < PHILOX_PARALLEL_EXECUTION_THRESHOLD) { + m_threads_num = 1; + } else { + m_threads_num = parallel_get_max_threads(); + } + m_philox_thread_params.resize(m_threads_num); + + parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { + auto& params = m_philox_thread_params[ithr]; + uint64_t start = 0lu, end = 0lu; + + if (m_jit_kernel) { +#if defined(OPENVINO_ARCH_X86_64) + const auto block_size = (m_jit_kernel->getVectorLen() / m_output_prc.size()) * 2; + const auto blocks_num = (m_output_elements_count + block_size - 1) / block_size; + const auto blocks_per_thr = (blocks_num + nthr - 1) / nthr; + + start = ithr * blocks_per_thr * block_size; + end = (ithr + 1) * blocks_per_thr * block_size; +#endif // OPENVINO_ARCH_X86_64 + } else { + const auto groups_num = (m_output_elements_count + PHILOX_GROUP_SIZE - 1) / PHILOX_GROUP_SIZE; + const auto groups_per_thr = (groups_num + nthr - 1) / nthr; + + start = ithr * groups_per_thr * PHILOX_GROUP_SIZE; + end = (ithr + 1) * groups_per_thr * PHILOX_GROUP_SIZE; + + params.step = m_output_prc.size() > 4 ? 2 : 4; + } + + if (end > m_output_elements_count) { + end = m_output_elements_count; + } + if (start > end) { + start = end; + } + params.work_amount = end - start; + params.n_shift = start / PHILOX_GROUP_SIZE; + params.dst_shift = start * m_output_prc.size(); + }); +} + +void RandomUniform::prepareMersenneTwisterParams() { + m_threads_num = parallel_get_max_threads(); + + if (m_jit_kernel) { +#if defined(OPENVINO_ARCH_X86_64) + // m_jit_kernel->getVectorLen() either 64, 32 or 16 (bytes) for Zmm, Ymm, Xmm respectively + m_uint_storage_capacity_per_thread = m_jit_kernel->getVectorLen() / sizeof(uint32_t); + const auto maximum_jit_threads = MERSENNE_STATE_N / m_uint_storage_capacity_per_thread; + m_threads_num = std::max(std::min(m_threads_num, maximum_jit_threads), 1); +#endif // OPENVINO_ARCH_X86_64 + } else { + // Each thread processes a pair of uints, generating either 1 or 2 outputs + m_uint_storage_capacity_per_thread = 2; + const auto maximum_threads = MERSENNE_STATE_N / m_uint_storage_capacity_per_thread; + m_threads_num = std::max(std::min(m_threads_num, maximum_threads), 1); + } + + m_mersenne_twister_thread_params.resize(m_threads_num); + m_mersenne_twister_optimization_enabled = + !(m_output_prc == element::i64 && (m_max_val.i64 > std::numeric_limits::max() || + m_min_val.i64 > std::numeric_limits::max())); + + const auto thread_offset = static_cast(MERSENNE_STATE_N) / static_cast(m_threads_num) / + static_cast(m_uint_storage_capacity_per_thread); + + const auto byte_offset = m_output_prc.size() / (m_mersenne_twister_optimization_enabled ? 1 : 2); + + parallel_nt(m_threads_num, [&](int ithr, int nthr) { + auto& params = m_mersenne_twister_thread_params[ithr]; + + auto approx_start = thread_offset * static_cast(ithr); + auto approx_end = thread_offset * (static_cast(ithr + 1)); + + auto state_start = static_cast(std::floor(approx_start) * m_uint_storage_capacity_per_thread); + auto state_end = static_cast(std::floor(approx_end) * m_uint_storage_capacity_per_thread); + + // Rounding failsafes + if (ithr == 0) { + state_start = 0; + } else if (ithr + 1 == m_threads_num) { + state_end = MERSENNE_STATE_N; + } + + auto state_accesses = std::ceil(static_cast(state_end - state_start) / + static_cast(m_uint_storage_capacity_per_thread)); + + // Destination index is computed in bytes, therefore the state index + // has to be divided by the byte size of dtype. + // in addition, when optimization is off, 2 values are consumed to create + // one output value, so the state index has to be divided by 2 + auto destination_start = state_start * byte_offset; + + params.src_start_idx = state_start; + params.dst_start_idx = destination_start; + params.state_accesses_count = state_accesses; + }); +} + +void RandomUniform::prepareGeneratorKernel() { +#if defined(OPENVINO_ARCH_X86_64) + if (m_algo == PHILOX) { + kernel::random_uniform::PhiloxGeneratorCompileParams jcp; + jcp.out_data_type = m_output_prc; + + m_jit_kernel = kernel::JitKernel:: + createInstance(jcp); + } else if (m_algo == MERSENNE_TWISTER) { + kernel::random_uniform::MersenneTwisterGeneratorCompileParams jcp; + jcp.out_data_type = m_output_prc; + jcp.optimized = m_mersenne_twister_optimization_enabled; + + m_jit_kernel = kernel::JitKernel:: + createInstance(jcp); + } + + if (m_jit_kernel) { + if (auto selected_pd = getSelectedPrimitiveDescriptor()) { + using namespace dnnl::impl::cpu; + if (m_jit_kernel->getIsa() == x64::avx512_core) { + selected_pd->setImplementationType(jit_avx512); + } else if (m_jit_kernel->getIsa() == x64::avx2) { + selected_pd->setImplementationType(jit_avx2); + } else if (m_jit_kernel->getIsa() == x64::sse41) { + selected_pd->setImplementationType(jit_sse42); + } + } + } +#endif // OPENVINO_ARCH_X86_64 +} + ////////////// PHILOX algo /////////////// namespace { -// Following const values are taken from the original paper: -// https://www.thesalmons.org/john/random123/papers/random123sc11.pdf -constexpr uint32_t CRUSH_RESISTANCE_CONST_LOWER_VALUE = 0x9E3779B9; -constexpr uint32_t CRUSH_RESISTANCE_CONST_UPPER_VALUE = 0xBB67AE85; -constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_N = 0xD2511F53; -constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER = 0xCD9E8D57; -constexpr uint64_t ROUNDS_NUMBER = 10llu; inline void calculateRound(const uint32_t* key, uint32_t* counter, uint32_t* n) { uint64_t prod_0 = STATISTIC_MAXIMIZING_MULTIPLIER_N * n[0]; @@ -242,11 +442,26 @@ inline void runPhilox(uint64_t key, uint64_t counter, uint64_t n, uint32_t* res) uint32_t* counter_32 = reinterpret_cast(&counter); uint32_t* n_32 = reinterpret_cast(&n); - for (size_t i = 0lu; i < ROUNDS_NUMBER; i++) { - calculateRound(key_32, counter_32, n_32); - if (i < ROUNDS_NUMBER - 1) - raiseKey(key_32); - } + // Loop unwarping for better performance + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); + raiseKey(key_32); + calculateRound(key_32, counter_32, n_32); res[0] = n_32[0]; res[1] = n_32[1]; @@ -254,7 +469,7 @@ inline void runPhilox(uint64_t key, uint64_t counter, uint64_t n, uint32_t* res) res[3] = counter_32[1]; } -inline void convertToOutputType(const uint32_t* in, float min, float range, float* out, size_t el_to_copy) { +inline void convertToOutputTypePhilox(const uint32_t* in, float min, float range, float* out, size_t el_to_copy) { RandomUniform::OutputType out_val; for (size_t i = 0lu; i < el_to_copy; i++) { @@ -263,7 +478,7 @@ inline void convertToOutputType(const uint32_t* in, float min, float range, floa } } -inline void convertToOutputType(const uint32_t* in, float16 min, float16 range, float16* out, size_t el_to_copy) { +inline void convertToOutputTypePhilox(const uint32_t* in, float16 min, float16 range, float16* out, size_t el_to_copy) { RandomUniform::OutputType out_val; for (size_t i = 0lu; i < el_to_copy; i++) { @@ -273,7 +488,11 @@ inline void convertToOutputType(const uint32_t* in, float16 min, float16 range, } } -inline void convertToOutputType(const uint32_t* in, bfloat16 min, bfloat16 range, bfloat16* out, size_t el_to_copy) { +inline void convertToOutputTypePhilox(const uint32_t* in, + bfloat16 min, + bfloat16 range, + bfloat16* out, + size_t el_to_copy) { RandomUniform::OutputType out_val; for (size_t i = 0lu; i < el_to_copy; i++) { @@ -283,13 +502,13 @@ inline void convertToOutputType(const uint32_t* in, bfloat16 min, bfloat16 range } } -inline void convertToOutputType(const uint32_t* in, int32_t min, int32_t range, int32_t* out, size_t el_to_copy) { +inline void convertToOutputTypePhilox(const uint32_t* in, int32_t min, int32_t range, int32_t* out, size_t el_to_copy) { for (size_t i = 0lu; i < el_to_copy; i++) { out[i] = static_cast(in[i] % range + min); } } -inline void convertToOutputType(const uint32_t* in, int64_t min, int64_t range, int64_t* out, size_t el_to_copy) { +inline void convertToOutputTypePhilox(const uint32_t* in, int64_t min, int64_t range, int64_t* out, size_t el_to_copy) { for (size_t i = 0lu; i < el_to_copy; i++) { out[i] = static_cast(((static_cast(in[i * 2]) << 32) + in[i * 2 + 1]) % range + min); } @@ -298,7 +517,7 @@ inline void convertToOutputType(const uint32_t* in, int64_t min, int64_t range, } // namespace std::pair RandomUniform::computePhilox(void* out, - size_t out_el_num, + size_t output_elements_count, const std::pair& prev_state) { // When both seed values are equal to zero RandomUniform should generate non-deterministic sequence. if (m_global_seed == 0lu && m_op_seed == 0lu) { @@ -308,7 +527,6 @@ std::pair RandomUniform::computePhilox(void* out, uint64_t n_state = prev_state.first; uint64_t counter_state = prev_state.second; - uint64_t counter = counter_state > 0 ? counter_state : m_op_seed; auto out_u8 = reinterpret_cast(out); @@ -316,43 +534,43 @@ std::pair RandomUniform::computePhilox(void* out, if (m_jit_kernel) { #if defined(OPENVINO_ARCH_X86_64) parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { - auto& p = m_thread_params[ithr]; - if (p.work_amount == 0lu) { + auto& params = m_philox_thread_params[ithr]; + if (params.work_amount == 0lu) { return; } - auto n = n_state + p.n_shift; + auto n = n_state + params.n_shift; - kernel::RandomUniformCallArgs args; + kernel::random_uniform::PhiloxGeneratorCallArgs args; - args.dst_ptr = (out_u8 + p.dst_shift); + args.dst_ptr = (out_u8 + params.dst_shift); args.key_ptr = &m_global_seed; args.counter_ptr = &counter; args.n_ptr = &n; args.min_ptr = &m_min_val; args.range_ptr = &m_range_val; - args.work_amount = p.work_amount; + args.work_amount = params.work_amount; (*m_jit_kernel)(&args); }); #endif // OPENVINO_ARCH_X86_64 } else { auto threadBody = [&](const int ithr, const int nthr) { - auto& p = m_thread_params[ithr]; - if (p.work_amount == 0lu) { + auto& params = m_philox_thread_params[ithr]; + if (params.work_amount == 0lu) { return; } - auto n = n_state + p.n_shift; - auto out_cur = out_u8 + p.dst_shift; - auto work_rest = static_cast(p.work_amount); + auto n = n_state + params.n_shift; + auto out_cur = out_u8 + params.dst_shift; + auto work_rest = static_cast(params.work_amount); uint32_t res[4]; #define EXEC_CASE(P) \ case element::P: { \ auto out_t = reinterpret_cast::value_type*>(out_cur); \ - for (; work_rest > 0l; work_rest -= p.step, out_t += p.step) { \ + for (; work_rest > 0l; work_rest -= params.step, out_t += params.step) { \ runPhilox(m_global_seed, counter, n, res); \ - auto el_to_copy = std::min(p.step, static_cast(work_rest)); \ - convertToOutputType(res, m_min_val.P, m_range_val.P, out_t, el_to_copy); \ + auto el_to_copy = std::min(params.step, static_cast(work_rest)); \ + convertToOutputTypePhilox(res, m_min_val.P, m_range_val.P, out_t, el_to_copy); \ if (++n == 0) { \ counter++; \ } \ @@ -384,143 +602,282 @@ std::pair RandomUniform::computePhilox(void* out, return {n_state, counter_state}; } -////////////// STL algo /////////////// -void RandomUniform::computeStl(void* out, size_t work_amount) { - switch (m_output_prc) { - case element::f32: { - generateData>( - std::uniform_real_distribution{m_min_val.f32, m_max_val.f32}, - out, - work_amount); - } break; - case element::i32: { - generateData>( - std::uniform_int_distribution{m_min_val.i32, m_max_val.i32}, - out, - work_amount); - } break; - case element::i64: { - generateData>( - std::uniform_int_distribution{m_min_val.i64, m_max_val.i64}, - out, - work_amount); - } break; - default: - THROW_CPU_NODE_ERR("has unsupported output type: ", m_output_prc); - } +////////////// MERSENNE algo /////////////// + +namespace { + +uint32_t twist(uint32_t u, uint32_t v) { + return (((u & 0x80000000) | (v & 0x7fffffff)) >> 1) ^ (v & 1 ? 0x9908b0df : 0); } -template -void RandomUniform::generateData(DISTR_TYPE distribution, void* out, size_t work_amount) { - auto dst = reinterpret_cast(out); - for (size_t i = 0; i < work_amount; i++) { - *dst = distribution(m_generator); - dst++; +inline void initial_mersenne_state(uint32_t* mersenne_state_ptr, uint64_t global_seed) { + mersenne_state_ptr[0] = global_seed & 0xffffffff; + for (uint32_t j = 1; j < MERSENNE_STATE_N; ++j) { + mersenne_state_ptr[j] = (1812433253 * (mersenne_state_ptr[j - 1] ^ (mersenne_state_ptr[j - 1] >> 30)) + j); } } -////////////////////////////////// -void RandomUniform::initEdgeValues(OutputType& dst, const void* src, const element::Type& output_type) { -#define EL_CASE(E) \ - case element::E: \ - dst.E = *reinterpret_cast::value_type*>(src); \ - break; +inline void next_mersenne_state(uint32_t* mersenne_state_ptr) { + auto* current_state_ptr = mersenne_state_ptr; + for (int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; current_state_ptr++) { + *current_state_ptr = current_state_ptr[MERSENNE_STATE_M] ^ twist(current_state_ptr[0], current_state_ptr[1]); + } - switch (output_type) { - EL_CASE(f32) - EL_CASE(f16) - EL_CASE(bf16) - EL_CASE(i32) - EL_CASE(i64) - EL_CASE(f64) - default: - THROW_CPU_NODE_ERR("has unsupported output precision: ", output_type); + for (int j = MERSENNE_STATE_M; --j; current_state_ptr++) { + *current_state_ptr = + current_state_ptr[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(current_state_ptr[0], current_state_ptr[1]); } -#undef EL_CASE + *current_state_ptr = + current_state_ptr[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(current_state_ptr[0], mersenne_state_ptr[0]); } -void RandomUniform::evalRange() { -#define EL_CASE(E) \ - case element::E: \ - m_range_val.E = m_max_val.E - m_min_val.E; \ - break; +void runMersenneTwister(uint32_t& random_nr_1, uint32_t& random_nr_2) { + random_nr_1 ^= (random_nr_1 >> 11); + random_nr_1 ^= (random_nr_1 << 7) & 0x9d2c5680; + random_nr_1 ^= (random_nr_1 << 15) & 0xefc60000; + random_nr_1 ^= (random_nr_1 >> 18); - switch (m_output_prc) { - EL_CASE(f32) - EL_CASE(f16) - EL_CASE(bf16) - EL_CASE(i32) - EL_CASE(i64) - EL_CASE(f64) - default: - THROW_CPU_NODE_ERR("has unsupported output precision: ", m_output_prc); + random_nr_2 ^= (random_nr_2 >> 11); + random_nr_2 ^= (random_nr_2 << 7) & 0x9d2c5680; + random_nr_2 ^= (random_nr_2 << 15) & 0xefc60000; + random_nr_2 ^= (random_nr_2 >> 18); +} + +inline void convertToOutputTypeMersenne(const uint32_t in1, + const uint32_t in2, + float min, + float range, + float* out, + int64_t elements_remaining, + bool optimization_enabled) { + const auto mask = static_cast((uint64_t(1) << std::numeric_limits::digits) - 1); + const auto divisor = static_cast(1) / (uint64_t(1) << std::numeric_limits::digits); + + out[0] = static_cast((in1 & mask) * divisor) * range + min; + if (elements_remaining >= 2l) { + out[1] = static_cast((in2 & mask) * divisor) * range + min; } +} -#undef EL_CASE +inline void convertToOutputTypeMersenne(const uint32_t in1, + const uint32_t in2, + float16 min, + float16 range, + float16* out, + int64_t elements_remaining, + bool optimization_enabled) { + const auto mask = static_cast((uint64_t(1) << std::numeric_limits::digits) - 1); + const auto divisor = static_cast(1) / (uint64_t(1) << std::numeric_limits::digits); + + out[0] = static_cast((in1 & mask) * divisor) * range + min; + if (elements_remaining >= 2l) { + out[1] = static_cast((in2 & mask) * divisor) * range + min; + } } -std::string RandomUniform::getPrimitiveDescriptorType() const { - auto selectedPrimitiveDesc = getSelectedPrimitiveDescriptor(); +inline void convertToOutputTypeMersenne(const uint32_t in1, + const uint32_t in2, + bfloat16 min, + bfloat16 range, + bfloat16* out, + int64_t elements_remaining, + bool optimization_enabled) { + const auto mask = static_cast((1UL << 8) - 1); + const auto divisor = static_cast(1) / (1UL << 8); - impl_desc_type type = impl_desc_type::undef; - if (selectedPrimitiveDesc) { - type = selectedPrimitiveDesc->getImplementationType(); + out[0] = static_cast((in1 & mask) * divisor) * range + min; + if (elements_remaining >= 2l) { + out[1] = static_cast((in2 & mask) * divisor) * range + min; } +} - std::string str_type; +inline void convertToOutputTypeMersenne(const uint32_t in1, + const uint32_t in2, + int32_t min, + int32_t range, + int32_t* out, + int64_t elements_remaining, + bool optimization_enabled) { + out[0] = static_cast(in1 % range + min); + if (elements_remaining >= 2l) { + out[1] = static_cast(in2 % range + min); + } +} - auto add_type = [&](std::string t) { - if (!str_type.empty() && t.c_str()[0] != '_') - str_type += "_"; - str_type += t; - }; +inline void convertToOutputTypeMersenne(const uint32_t in1, + const uint32_t in2, + int64_t min, + int64_t range, + int64_t* out, + int64_t elements_remaining, + bool optimization_enabled) { + if (optimization_enabled) { + out[0] = static_cast(in1 % range + min); + if (elements_remaining >= 2l) { + out[1] = static_cast(in2 % range + min); + } + } else { + out[0] = static_cast(((static_cast(in1) << 32) + in2) % range + min); + } +} +} // namespace -#define SEARCH_TYPE(_type) \ - if ((type & impl_desc_type::_type) == impl_desc_type::_type) \ - add_type(#_type) +void RandomUniform::computeMersenneTwister(void* out, size_t output_elements_count) { + // When both seed values are equal to zero RandomUniform should generate non-deterministic sequence. + if (m_global_seed == 0lu && m_op_seed == 0lu) { + std::srand(static_cast(std::time(nullptr))); + m_global_seed = std::rand(); + } - SEARCH_TYPE(undef); - SEARCH_TYPE(jit); - SEARCH_TYPE(ref); + const auto elements_consumed_per_one_output = m_mersenne_twister_optimization_enabled ? 1 : 2; + const auto state_regenerations_required = + static_cast(std::ceil(static_cast(output_elements_count) / + static_cast(MERSENNE_STATE_N / elements_consumed_per_one_output))); + const auto byte_offset = MERSENNE_STATE_N * m_output_prc.size(); - SEARCH_TYPE(avx512); - SEARCH_TYPE(avx2); - SEARCH_TYPE(sse42); - SEARCH_TYPE(any); + uint32_t mersenne_state_ptr[MERSENNE_STATE_N]; + auto output_byte_ptr = reinterpret_cast(out); + initial_mersenne_state(mersenne_state_ptr, m_global_seed); -#undef SEARCH_TYPE + if (m_jit_kernel) { +#if defined(OPENVINO_ARCH_X86_64) + for (uint64_t i = 0; i < state_regenerations_required; ++i) { + next_mersenne_state(mersenne_state_ptr); + parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { + kernel::random_uniform::MersenneTwisterGeneratorCallArgs args; + auto& params = m_mersenne_twister_thread_params[ithr]; + args.min_ptr = &m_min_val; + args.range_ptr = &m_range_val; + args.max_output_idx = output_elements_count; + args.state_accesses_count = params.state_accesses_count; + args.state_ptr = mersenne_state_ptr + params.src_start_idx; + args.dst_ptr = output_byte_ptr + params.dst_start_idx + i * byte_offset; + args.output_idx = params.src_start_idx + i * MERSENNE_STATE_N; + args.elements_to_generate = static_cast( + std::min(static_cast(m_uint_storage_capacity_per_thread) * args.state_accesses_count, + args.max_output_idx - args.output_idx)); + + if (args.output_idx >= args.max_output_idx) { + return; + } - if (type == impl_desc_type::unknown) - str_type = "unknown"; - else if (str_type.empty()) - str_type = "undef"; + // For loop could not be inside the kernel as I ran out of Reg64s available + for (uint64_t j = 0; j < args.state_accesses_count; ++j) { + (*m_jit_kernel)(&args); + + args.elements_to_generate = + std::max(args.elements_to_generate - static_cast(m_uint_storage_capacity_per_thread), + static_cast(0)); + args.state_ptr = reinterpret_cast(args.state_ptr) + + m_uint_storage_capacity_per_thread * m_output_prc.size(); + args.dst_ptr = reinterpret_cast(args.dst_ptr) + + m_uint_storage_capacity_per_thread * m_output_prc.size(); + args.output_idx += m_uint_storage_capacity_per_thread; + } + }); + } +#endif // OPENVINO_ARCH_X86_64 + } else { + const auto elements_generated_per_access = m_mersenne_twister_optimization_enabled + ? m_uint_storage_capacity_per_thread + : m_uint_storage_capacity_per_thread / 2; + + for (uint64_t i = 0; i < state_regenerations_required; ++i) { + next_mersenne_state(mersenne_state_ptr); + parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { + auto& params = m_mersenne_twister_thread_params[ithr]; + auto state_ptr = mersenne_state_ptr + params.src_start_idx; + auto dst_ptr = output_byte_ptr + params.dst_start_idx + i * byte_offset; + auto output_idx = params.src_start_idx + i * MERSENNE_STATE_N; + auto max_output_idx = output_elements_count; + auto state_accesses_count = params.state_accesses_count; + auto elements_to_generate = static_cast( + std::min(static_cast(m_uint_storage_capacity_per_thread) * state_accesses_count, + max_output_idx - output_idx)); + + if (output_idx == max_output_idx) { + return; + } - if (selectedPrimitiveDesc) { - if (selectedPrimitiveDesc->getConfig().outConfs[0].getMemDesc()->getPrecision() != ov::element::u8) { - str_type += - "_" + std::string( - selectedPrimitiveDesc->getConfig().outConfs[0].getMemDesc()->getPrecision().get_type_name()); - } else { - str_type += "_I8"; +#define EXEC_CASE(P) \ + case element::P: { \ + auto dst_dtype_ptr = reinterpret_cast::value_type*>(dst_ptr); \ + for (uint64_t j = 0; j < state_accesses_count; ++j) { \ + if (output_idx >= max_output_idx) { \ + return; \ + } \ + \ + uint32_t random_nr_1 = state_ptr[0], random_nr_2 = state_ptr[1]; \ + runMersenneTwister(random_nr_1, random_nr_2); \ + convertToOutputTypeMersenne(random_nr_1, \ + random_nr_2, \ + m_min_val.P, \ + m_range_val.P, \ + dst_dtype_ptr, \ + elements_to_generate, \ + m_mersenne_twister_optimization_enabled); \ + \ + elements_to_generate -= elements_generated_per_access; \ + state_ptr += m_uint_storage_capacity_per_thread; \ + dst_dtype_ptr += elements_generated_per_access; \ + output_idx += elements_generated_per_access; \ + } \ + } break; + + switch (m_output_prc) { + EXEC_CASE(f32) + EXEC_CASE(f16) + EXEC_CASE(bf16) + EXEC_CASE(i32) + EXEC_CASE(i64) + default: + THROW_CPU_NODE_ERR("Unsupported type of RandomUniform: ", m_output_prc.to_string()); + } + }); } } - - return str_type; +#undef EXEC_CASE } -bool RandomUniform::needShapeInfer() const { - return !m_const_inputs[SHAPE]; -} +////////////// STL algorithm /////////////// -bool RandomUniform::isExecutable() const { - return !isInputTensorAtPortEmpty(SHAPE); +template +void RandomUniform::generateData(DISTR_TYPE distribution, void* out, size_t work_amount) { + auto dst = reinterpret_cast(out); + for (size_t i = 0; i < work_amount; i++) { + *dst = distribution(m_generator); + dst++; + } } -bool RandomUniform::created() const { - return getType() == Type::RandomUniform; +void RandomUniform::computeStl(void* out, size_t work_amount) { + switch (m_output_prc) { + case element::f32: { + generateData>( + std::uniform_real_distribution{m_min_val.f32, m_max_val.f32}, + out, + work_amount); + } break; + case element::i32: { + generateData>( + std::uniform_int_distribution{m_min_val.i32, m_max_val.i32}, + out, + work_amount); + } break; + case element::i64: { + generateData>( + std::uniform_int_distribution{m_min_val.i64, m_max_val.i64}, + out, + work_amount); + } break; + default: + THROW_CPU_NODE_ERR("has unsupported output type: ", m_output_prc); + } } +////////////////////////////////// + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.hpp b/src/plugins/intel_cpu/src/nodes/random_uniform.hpp index b04d6b3635a10b..80e426ec5ea476 100644 --- a/src/plugins/intel_cpu/src/nodes/random_uniform.hpp +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.hpp @@ -17,14 +17,14 @@ namespace node { class RandomUniform : public Node { public: union OutputType { + double f64; float f32; float16 f16; bfloat16 bf16; - double f64; + int64_t i64; int32_t i32; uint32_t u32; uint16_t u16; - int64_t i64; }; RandomUniform(const std::shared_ptr& op, const GraphContext::CPtr& context); @@ -59,23 +59,14 @@ class RandomUniform : public Node { bool needShapeInfer() const override; private: - void computeStl(void* out, size_t work_amount); - - std::pair computePhilox(void* out, - size_t work_amount, - const std::pair& prev_state); - - template - void generateData(DISTR_TYPE distribution, void* out, size_t work_amount); - - void initOutShape(VectorDims& dst, const void* src, const element::Type& shape_type, size_t len); + void evalRange(); void initEdgeValues(OutputType& dst, const void* src, const element::Type& output_type); - void evalRange(); + void prepareGeneratorKernel(); - enum { SHAPE = 0, MIN_VAL, MAX_VAL }; - enum AlgoType { STL, PHILOX }; + enum PortIndex { SHAPE = 0, MIN_VAL, MAX_VAL }; + enum AlgorithmType { STL = 0, PHILOX, MERSENNE_TWISTER }; bool m_const_inputs[3] = {false, false, false}; @@ -85,40 +76,85 @@ class RandomUniform : public Node { std::pair m_state{0lu, 0lu}; VectorDims m_out_shape = {}; - uint64_t m_out_el_num = 1lu; + uint64_t m_output_elements_count = 1lu; OutputType m_min_val; OutputType m_max_val; OutputType m_range_val; - AlgoType m_algo = PHILOX; + AlgorithmType m_algo = STL; - std::default_random_engine m_generator; + ///////////////////////////////////////////////////////////////////////////////// + + ///// PARALLELISM ///// + + std::shared_ptr m_jit_kernel; - struct ThreadParams { + struct PhiloxThreadParams { uint64_t work_amount = 0lu; uint64_t dst_shift = 0lu; uint64_t n_shift = 0lu; uint64_t step = 0lu; }; - uint64_t m_threads_num = 0lu; - std::vector m_thread_params; + struct MersenneTwisterThreadParams { + uint64_t src_start_idx = 0lu; + uint64_t dst_start_idx = 0lu; + uint64_t state_accesses_count = 0lu; + }; + + int32_t m_threads_num = 0; + + std::vector m_philox_thread_params; + std::vector m_mersenne_twister_thread_params; + + ///////////////////////////////////////////////////////////////////////////////// + + ///// PHILOX ///// - ///// PHILOX constants ///// + // Output elements number threshold to execute on one thread. + static constexpr uint64_t PHILOX_PARALLEL_EXECUTION_THRESHOLD = 1000lu; // Determines how many sequence elements of RNG sequence are skipped between runs. - // Can be any positive value, 256 is chosen for parity with Tensorflow. + // 256 is chosen for parity with Tensorflow. static constexpr uint64_t SKIP_CONST = 256lu; // Philox algorithm returns 4 elements of RNG sequence per each invocation static constexpr uint64_t PHILOX_GROUP_SIZE = 4lu; - // Output elements number threshold to execute on one thread. - static constexpr uint64_t PHILOX_PARALLEL_EXECUTION_THRESHOLD = 1000lu; - + // Used to parallelize state generation uint64_t m_skip_count = 0lu; + + void preparePhiloxParams(); + + std::pair computePhilox(void* out, + size_t work_amount, + const std::pair& prev_state); + ///////////////////////////////////////////////////////////////////////////////// - std::shared_ptr m_jit_kernel; + ///// MERSENNE TWISTER ///// + + // PyTorch reduces the execution time when generating 64-bit numbers when the range is below max value of uint32_t + // To reduce variable use, value of 'true' denotes the case in which for every uint32_t a single random value is + // generated for any dtype. Therefore, value of 'false' occurs only when dtype is int64 AND the range is above + // uint32_t. + bool m_mersenne_twister_optimization_enabled = true; + + int32_t m_uint_storage_capacity_per_thread = 1; + + void prepareMersenneTwisterParams(); + + void computeMersenneTwister(void* out, size_t work_amount); + + ///////////////////////////////////////////////////////////////////////////////// + + ///// STL ///// + + std::default_random_engine m_generator; + + template + void generateData(DISTR_TYPE distribution, void* out, size_t work_amount); + + void computeStl(void* out, size_t work_amount); }; } // namespace node diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.cpp index 1a95c8ae31d242..c71bd87ec2ba7c 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.cpp @@ -16,21 +16,22 @@ std::string RandomUniformLayerTestCPU::getTestCaseName(const testing::TestParamI std::ostringstream result; - result << "IS={" << out_shape.size(); - result << "}_OS=" << out_shape; + result << "IS=[" << out_shape.size(); + result << "]_OS=" << out_shape; result << "_Min=" << std::get<0>(min_max); result << "_Max=" << std::get<1>(min_max); result << "_ShapePrc=" << std::get<2>(obj.param); result << "_OutPrc=" << std::get<3>(obj.param); result << "_GlobalSeed=" << std::get<4>(obj.param); result << "_OperationalSeed=" << std::get<5>(obj.param); - result << "_ConstIn={" << utils::bool2str(std::get<6>(obj.param)) << "," - << utils::bool2str(std::get<7>(obj.param)) << "," - << utils::bool2str(std::get<8>(obj.param)) << "}"; + result << "_Alignment=" << std::get<6>(obj.param); + result << "_ConstIn={" << utils::bool2str(std::get<7>(obj.param)) << "," + << utils::bool2str(std::get<8>(obj.param)) << "," + << utils::bool2str(std::get<9>(obj.param)) << "}"; - result << CPUTestsBase::getTestCaseName(std::get<9>(obj.param)); + result << CPUTestsBase::getTestCaseName(std::get<10>(obj.param)); - const auto& config = std::get<10>(obj.param); + const auto& config = std::get<11>(obj.param); if (!config.empty()) { result << "_PluginConf={"; for (const auto& conf_item : config) { @@ -53,11 +54,12 @@ void RandomUniformLayerTestCPU::SetUp() { const auto& output_prc = std::get<3>(params); m_global_seed = std::get<4>(params); m_operational_seed = std::get<5>(params); - const auto& const_in_1 = std::get<6>(params); - const auto& const_in_2 = std::get<7>(params); - const auto& const_in_3 = std::get<8>(params); - const auto& cpu_params = std::get<9>(params); - configuration = std::get<10>(params); + const auto& alignment = std::get<6>(params); + const auto& const_in_1 = std::get<7>(params); + const auto& const_in_2 = std::get<8>(params); + const auto& const_in_3 = std::get<9>(params); + const auto& cpu_params = std::get<10>(params); + configuration = std::get<11>(params); m_min_val = std::get<0>(min_max); m_max_val = std::get<1>(min_max); @@ -118,7 +120,7 @@ void RandomUniformLayerTestCPU::SetUp() { init_input_shapes(in_shapes); - const auto rnd_op = std::make_shared(inputs[0], inputs[1], inputs[2], output_prc, m_global_seed, m_operational_seed); + const auto rnd_op = std::make_shared(inputs[0], inputs[1], inputs[2], output_prc, m_global_seed, m_operational_seed, alignment); const ov::ResultVector results{std::make_shared(rnd_op)}; function = std::make_shared(results, in_params, "RandomUniformLayerTestCPU"); @@ -130,6 +132,14 @@ void RandomUniformLayerTestCPU::SetUp() { if (!ov::with_cpu_x86_avx512_core_fp16()) { convert_precisions.insert({ ov::element::f16, ov::element::f32 }); } + + if (m_global_seed != 0lu || m_operational_seed != 0lu) { + // When seeds are non-zero, generator output should be exactly the same + // but due to some rounding errors, these thresholds are still necessary + // albeit the number of these 'rounding errors' is minimal (1 in 1000). + abs_threshold = 1e-6; + rel_threshold = 1e-3; + } } template @@ -195,10 +205,8 @@ void RandomUniformLayerTestCPU::compare(const std::vector& expected, SubgraphBaseTest::compare(expected, actual); return; } - // When both seed values are equal to zero, RandomUniform should generate non-deterministic sequence. // In this case will use Mean and Variance metrics. - #define CASE(X) case X : rndUCompare::value_type>(expected[0], actual[0]); break; switch (expected[0].get_element_type()) { diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.hpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.hpp index ec58a8cc6b5347..f35915bf75c4cc 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/random_uniform.hpp @@ -17,6 +17,7 @@ typedef std::tuple< ov::test::ElementType, // Output precision uint64_t, // Global seed uint64_t, // Operational seed + ov::op::PhiloxAlignment, // Alignment of generator bool, // Is 1st input constant bool, // Is 2nd input constant bool, // Is 3rd input constant diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/random_uniform.cpp index ad5ecffe4a5437..ce6ccb7c1d3e44 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/random_uniform.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/random_uniform.cpp @@ -34,6 +34,11 @@ static const std::vector> min_max = { {-50, 0} }; +static const std::vector alignment = { + ov::op::PhiloxAlignment::TENSORFLOW, + ov::op::PhiloxAlignment::PYTORCH +}; + INSTANTIATE_TEST_SUITE_P(smoke_Param, RandomUniformLayerTestCPU, ::testing::Combine( ::testing::ValuesIn(output_shapes), @@ -42,6 +47,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Param, RandomUniformLayerTestCPU, ::testing::Values(ElementType::f32, ElementType::i32), ::testing::ValuesIn(global_seed), ::testing::ValuesIn(operational_seed), + ::testing::ValuesIn(alignment), ::testing::Values(false), ::testing::Values(false), ::testing::Values(false), @@ -57,6 +63,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ParamConst, RandomUniformLayerTestCPU, ::testing::Values(ElementType::f32), ::testing::Values(1), ::testing::Values(0), + ::testing::ValuesIn(alignment), ::testing::Values(true, false), ::testing::Values(true, false), ::testing::Values(true, false), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/random_uniform.cpp index 552ea7eb09cb0b..70db663ae0f27c 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/random_uniform.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/random_uniform.cpp @@ -20,7 +20,8 @@ static const std::vector output_prc_nightly = { // Need to validate the Kernel corner cases. static const std::vector output_shapes_nightly = { - {1}, {2}, {3}, {2, 2}, {5}, {2, 3}, {7}, {2, 2, 2}, {3, 3}, {2, 5}, {11}, {2, 3, 2}, {13}, {2, 7}, {3, 5}, + {1}, {2}, {24}, {20}, {36}, {624}, {625}, + {2, 2}, {5}, {2, 3}, {7}, {2, 2, 2}, {3, 3}, {2, 5}, {11}, {2, 3, 2}, {13}, {2, 7}, {3, 5}, {4, 4}, {1, 17}, {2, 9}, {19}, {4, 5}, {21}, {11, 2}, {23, 1}, {4, 2, 3}, {5, 5}, {26}, {1, 27}, {14, 2}, {29}, {10, 3}, {31}, {2, 8, 2}, {33}, {17, 2}, {5, 7}, {2, 3, 2, 3}, {37}, {2, 19}, {2, 20}, {41}, {42}, {43}, {22, 2}, {3, 5, 3}, {5, 2, 5}, {1, 3, 1, 17, 1}, {26, 2}, {53}, {54}, {55}, {56}, {57}, {58}, {59}, @@ -35,6 +36,8 @@ INSTANTIATE_TEST_SUITE_P(nightly_Param, RandomUniformLayerTestCPU, ::testing::ValuesIn(output_prc_nightly), ::testing::Values(3), ::testing::Values(1), + ::testing::Values(ov::op::PhiloxAlignment::TENSORFLOW, + ov::op::PhiloxAlignment::PYTORCH), ::testing::Values(true, false), ::testing::Values(true, false), ::testing::Values(true, false), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/random_uniform.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/random_uniform.cpp index c64c8fa14f58a5..2e0ad3c85783f3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/random_uniform.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/random_uniform.cpp @@ -5,6 +5,8 @@ #include "common_test_utils/test_constants.hpp" #include "single_op_tests/random_uniform.hpp" +#include "openvino/op/util/attr_types.hpp" + using ov::test::RandomUniformLayerTest; namespace { @@ -32,6 +34,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(random_uniform_type_specific_params), ::testing::ValuesIn(global_seeds), ::testing::ValuesIn(op_seeds), + ::testing::Values(ov::op::PhiloxAlignment::TENSORFLOW), ::testing::Values(ov::test::utils::DEVICE_GPU)), RandomUniformLayerTest::getTestCaseName); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/random_uniform.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/random_uniform.hpp index b3b4dadec393a6..c286be721b74c8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/random_uniform.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/random_uniform.hpp @@ -10,6 +10,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" +#include "openvino/op/util/attr_types.hpp" namespace ov { namespace test { @@ -24,6 +25,7 @@ using RandomUniformParamsTuple = typename std::tuple< RandomUniformTypeSpecificParams, // Parameters which depends on output type int64_t, // Global seed int64_t, // Operation seed + ov::op::PhiloxAlignment, // Alignment of generator ov::test::TargetDevice // Device name >; diff --git a/src/tests/functional/shared_test_classes/src/single_op/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_op/random_uniform.cpp index b49bba9f1ff6d4..81c40592db449f 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/random_uniform.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/random_uniform.cpp @@ -13,8 +13,9 @@ std::string RandomUniformLayerTest::getTestCaseName(const testing::TestParamInfo ov::Shape input_shape; int64_t global_seed; int64_t op_seed; + ov::op::PhiloxAlignment alignment; std::string target_device; - std::tie(input_shape, random_uniform_params, global_seed, op_seed, target_device) = obj.param; + std::tie(input_shape, random_uniform_params, global_seed, op_seed, alignment, target_device) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(input_shape) << "_"; @@ -23,6 +24,7 @@ std::string RandomUniformLayerTest::getTestCaseName(const testing::TestParamInfo result << "min_val=" << random_uniform_params.min_value << "_"; result << "max_val=" << random_uniform_params.max_value << "_"; result << "modelType=" << random_uniform_params.model_type.to_string() << "_"; + result << "alignment=" << alignment << "_"; result << "trgDev=" << target_device; return result.str(); } @@ -32,7 +34,8 @@ void RandomUniformLayerTest::SetUp() { ov::Shape input_shape; int64_t global_seed; int64_t op_seed; - std::tie(input_shape, random_uniform_params, global_seed, op_seed, targetDevice) = this->GetParam(); + ov::op::PhiloxAlignment alignment; + std::tie(input_shape, random_uniform_params, global_seed, op_seed, alignment, targetDevice) = this->GetParam(); auto model_type = random_uniform_params.model_type; // Use Parameter as input with desired model_type to properly configure execution configuration @@ -70,7 +73,8 @@ void RandomUniformLayerTest::SetUp() { max_value, model_type, global_seed, - op_seed); + op_seed, + alignment); function = std::make_shared(random_uniform->outputs(), ov::ParameterVector{input}, "random_uniform"); } From 706f340f6505b0366a9465e1070d476220a40ea2 Mon Sep 17 00:00:00 2001 From: Piotr Kowalczyk Date: Thu, 19 Dec 2024 14:37:51 +0100 Subject: [PATCH 25/60] [GPU]: Added basic STFT implementation (#27794) ### Details: - Added reference impl of STFT op with unit and functional tests. ### Tickets: - *CVS-147162* --------- Co-authored-by: Pawel Raasz Co-authored-by: Michal Lukaszewski --- .../single_layer_tests/stft.cpp | 73 +- .../intel_gpu/plugin/primitives_list.hpp | 1 + .../include/intel_gpu/primitives/stft.hpp | 62 + .../src/graph/impls/ocl/register.cpp | 1 + .../src/graph/impls/ocl/register.hpp | 1 + .../intel_gpu/src/graph/impls/ocl/stft.cpp | 97 ++ .../src/graph/impls/registry/registry.hpp | 1 + .../intel_gpu/src/graph/include/stft_inst.h | 45 + src/plugins/intel_gpu/src/graph/stft.cpp | 63 + .../kernel_selector/cl_kernels/stft_ref.cl | 68 + .../src/kernel_selector/common_types.h | 3 +- .../kernels/stft/stft_kernel_base.cpp | 90 ++ .../kernels/stft/stft_kernel_base.h | 34 + .../kernels/stft/stft_kernel_ref.cpp | 36 + .../kernels/stft/stft_kernel_ref.h | 18 + .../kernels/stft/stft_kernel_selector.cpp | 17 + .../kernels/stft/stft_kernel_selector.h | 21 + src/plugins/intel_gpu/src/plugin/ops/stft.cpp | 25 + .../intel_gpu/src/plugin/program_builder.cpp | 4 +- .../single_layer_tests/stft.cpp | 17 + .../tests/unit/test_cases/stft_gpu_test.cpp | 208 +++ .../shared_test_classes/base/utils/ranges.hpp | 2 +- .../shared_test_classes/single_op/stft.hpp | 11 + .../src/single_op/stft.cpp | 77 + .../unit_test_utils/tests_data/stft_data.h | 1299 +++++++++++++++++ 25 files changed, 2202 insertions(+), 72 deletions(-) create mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp create mode 100644 src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp create mode 100644 src/plugins/intel_gpu/src/graph/include/stft_inst.h create mode 100644 src/plugins/intel_gpu/src/graph/stft.cpp create mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl create mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp create mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h create mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp create mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h create mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp create mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h create mode 100644 src/plugins/intel_gpu/src/plugin/ops/stft.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp create mode 100644 src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp create mode 100644 src/tests/test_utils/unit_test_utils/tests_data/stft_data.h diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp index 91505d131d6aa5..f3cf3280288846 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp @@ -10,75 +10,10 @@ namespace ov { namespace test { -using ov::test::STFTLayerTest; -const std::vector data_type = {ov::element::f32, ov::element::bf16}; -const std::vector step_size_type = {ov::element::i32, ov::element::i64}; - -const std::vector> input_shapes = { - { // Static shapes - {{}, {{128}}}, // 1st input - {{}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Static shapes - {{}, {{1, 128}}}, // 1st input - {{}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Static shapes - {{}, {{2, 226}}}, // 1st input - {{}, {{16}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims in the first input shape - {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input - {{}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims in the first and second input shape - {{-1}, {{128}}}, // 1st input - {{-1}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims in the first and second input shape - {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input - {{-1}, {{8}, {16}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims with range in the first and second input shape - {{{2, 4}, {1, 300}}, {{2, 226}, {3, 128}}}, // 1st input - {{{3, 16}}, {{4}, {16}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - } -}; - -const std::vector frame_size = {16, 24}; -const std::vector step_size = {2, 3, 4}; - -const std::vector transpose_frames = { - false, - true, -}; - -std::vector in_types = {utils::InputLayerType::CONSTANT, utils::InputLayerType::PARAMETER}; - -const auto testCaseStatic = ::testing::Combine(::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(frame_size), - ::testing::ValuesIn(step_size), - ::testing::ValuesIn(transpose_frames), - ::testing::ValuesIn(data_type), - ::testing::ValuesIn(step_size_type), - ::testing::ValuesIn(in_types), - ::testing::Values(ov::test::utils::DEVICE_CPU)); - -INSTANTIATE_TEST_SUITE_P(smoke_STFT_static, STFTLayerTest, testCaseStatic, STFTLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_STFT_static, + STFTLayerTest, + STFTLayerTest::GetTestDataForDevice(ov::test::utils::DEVICE_CPU), + STFTLayerTest::getTestCaseName); } // namespace test } // namespace ov diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index e234bc68de0750..c7524f1880157d 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -273,6 +273,7 @@ REGISTER_FACTORY(v15, ROIAlignRotated); REGISTER_FACTORY(v15, BitwiseRightShift); REGISTER_FACTORY(v15, BitwiseLeftShift); REGISTER_FACTORY(v15, SearchSorted); +REGISTER_FACTORY(v15, STFT); // --------------------------- Supported internal ops --------------------------- // REGISTER_FACTORY(internal, NonMaxSuppressionIEInternal); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp new file mode 100644 index 00000000000000..8cb1a4028dfa26 --- /dev/null +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "primitive.hpp" + +namespace cldnn { + +/// @brief Short time fourier transform (STFT) operation. +/// @details Checks the specification for details. +struct STFT : public primitive_base { + CLDNN_DECLARE_PRIMITIVE(STFT) + + STFT() : primitive_base("", {}) {} + + /// @brief Constructs STFT primitive. + /// @param id This primitive id. + /// @param signal signal input. + /// @param window window input. + /// @param frame_size Size of the frame. + /// @param frame_step Step between frames. + /// @param transpose_frames Enable/Disable transpose_frames(check specification for details).. + + STFT(const primitive_id& id, + const input_info& signal, + const input_info& window, + const input_info& frame_size, + const input_info& frame_step, + const bool transpose_frames) + : primitive_base(id, {signal, window, frame_size, frame_step}), + transpose_frames(transpose_frames) {} + + /// @brief Enable/Disabletranspose_frames(check specification for details). + bool transpose_frames = false; + + size_t hash() const override { + size_t seed = primitive::hash(); + seed = hash_combine(seed, transpose_frames); + return seed; + } + + bool operator==(const primitive& rhs) const override { + if (!compare_common_params(rhs)) + return false; + + auto rhs_casted = downcast(rhs); + + return transpose_frames == rhs_casted.transpose_frames; + } + + void save(BinaryOutputBuffer& ob) const override { + primitive_base::save(ob); + ob << transpose_frames; + } + + void load(BinaryInputBuffer& ib) override { + primitive_base::load(ib); + ib >> transpose_frames; + } +}; +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp index 2a38d20ac8c9bc..29c0e3371cad2e 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp @@ -88,6 +88,7 @@ void register_implementations() { REGISTER_OCL(scaled_dot_product_attention); REGISTER_OCL(rope); REGISTER_OCL(search_sorted); + REGISTER_OCL(STFT); } } // namespace ocl diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp index c65a23822a6922..28e8956619b223 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp @@ -162,6 +162,7 @@ REGISTER_OCL(unique_gather); REGISTER_OCL(scaled_dot_product_attention); REGISTER_OCL(rope); REGISTER_OCL(search_sorted); +REGISTER_OCL(STFT); #undef REGISTER_OCL diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp new file mode 100644 index 00000000000000..329b442e731373 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "primitive_base.hpp" +#include "stft/stft_kernel_base.h" +#include "stft/stft_kernel_selector.h" +#include "stft_inst.h" + +namespace cldnn { +namespace ocl { + +struct STFT_impl : typed_primitive_impl_ocl { + using parent = typed_primitive_impl_ocl; + using parent::parent; + using kernel_selector_t = kernel_selector::STFT_kernel_selector; + using kernel_params_t = kernel_selector::STFT_params; + + DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::STFT_impl) + + std::unique_ptr clone() const override { + return make_unique(*this); + } + + void load(BinaryInputBuffer& ib) override { + parent::load(ib); + if (is_dynamic()) { + auto& kernel_selector = kernel_selector_t::Instance(); + auto kernel_impl = kernel_selector.GetImplementation(_kernel_data.kernelName); + kernel_impl->GetUpdateDispatchDataFunc(_kernel_data); + } + } + + void update_dispatch_data(const kernel_impl_params& impl_param) override { + // If model loaded from cache, params are not initialized, so we create a new object and reuse it in the future + if (_kernel_data.params == nullptr) { + _kernel_data.params = std::make_shared(get_kernel_params(impl_param, true)); + } + + update_shapes(*_kernel_data.params, impl_param); + (_kernel_data.update_dispatch_data_func)(*_kernel_data.params, _kernel_data); + } + + static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param, bool shape_agnostic = false) { + const auto& primitive = impl_param.typed_desc(); + auto params = get_default_params(impl_param, shape_agnostic); + + // Manually add all inputs except first one, since get_default_params does not handle it. + for (size_t i = 1; i < impl_param.input_layouts.size(); ++i) { + params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(i))); + } + + params.transpose_frames = primitive->transpose_frames; + return params; + } + + // [NOTE]: Has to be added as a separete static function, since it is called via static dispatching in + // typed_primitive_impl_ocl::create().. + static kernel_impl_params static_canonicalize_shapes(const kernel_impl_params& impl_params) { + auto updated_impl_params = canonicalize_fused_shapes(impl_params); + + for (auto& input_layout : updated_impl_params.input_layouts) { + input_layout.set_partial_shape(extend_shape_to_rank_from_begin(input_layout.get_partial_shape())); + } + + for (auto& output_layout : updated_impl_params.output_layouts) { + output_layout.set_partial_shape(extend_shape_to_rank_from_begin(output_layout.get_partial_shape())); + } + + return updated_impl_params; + } + + kernel_impl_params canonicalize_shapes(const kernel_impl_params& impl_params) const override { + return static_canonicalize_shapes(impl_params); + } +}; + +namespace detail { + +attach_STFT_impl::attach_STFT_impl() { + auto types = {data_types::i32, data_types::i64, data_types::f16, data_types::f32}; + + auto formats = {format::bfyx}; + + implementation_map::add(impl_types::ocl, + shape_types::any, + typed_primitive_impl_ocl::create, + types, + formats); +} + +} // namespace detail +} // namespace ocl +} // namespace cldnn + +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::STFT_impl) +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::STFT) diff --git a/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp b/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp index b7dbbaef6e64f1..f45d0897f01363 100644 --- a/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp @@ -216,3 +216,4 @@ REGISTER_DEFAULT_IMPLS(unique_gather, OCL_S, OCL_D); REGISTER_DEFAULT_IMPLS(scaled_dot_product_attention, OCL_S, OCL_D); REGISTER_DEFAULT_IMPLS(rope, OCL_S, OCL_D); REGISTER_DEFAULT_IMPLS(search_sorted, OCL_S, OCL_D); +REGISTER_DEFAULT_IMPLS(STFT, OCL_S, OCL_D); diff --git a/src/plugins/intel_gpu/src/graph/include/stft_inst.h b/src/plugins/intel_gpu/src/graph/include/stft_inst.h new file mode 100644 index 00000000000000..1c770e93253e93 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/include/stft_inst.h @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include + +#include "primitive_inst.h" + +namespace cldnn { + +template <> +struct typed_program_node : public typed_program_node_base { + using parent = typed_program_node_base; + typed_program_node(const std::shared_ptr prim, program& prog) : parent(prim, prog) {} + +public: + using parent::parent; + + program_node& input(size_t idx = 0) const { + return get_dependency(idx); + } + std::vector get_shape_infer_dependencies() const override { + return {2, 3}; + } +}; + +using STFT_node = typed_program_node; + +template <> +class typed_primitive_inst : public typed_primitive_inst_base { + using parent = typed_primitive_inst_base; + using parent::parent; + +public: + typed_primitive_inst(network& network, STFT_node const& desc); + template + static std::vector calc_output_layouts(STFT_node const& node, kernel_impl_params const& impl_param); + static layout calc_output_layout(STFT_node const& node, kernel_impl_params const& impl_param); + static std::string to_string(STFT_node const& node); +}; + +using STFT_inst = typed_primitive_inst; + +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/stft.cpp b/src/plugins/intel_gpu/src/graph/stft.cpp new file mode 100644 index 00000000000000..33de025249a660 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/stft.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +#include + +#include "memory_accessor.hpp" +#include "openvino/core/enum_names.hpp" +#include "primitive_type_base.h" +#include "stft_shape_inference.hpp" + +namespace cldnn { +GPU_DEFINE_PRIMITIVE_TYPE_ID(STFT) + +STFT_inst::typed_primitive_inst(network& network, STFT_node const& node) : parent(network, node) {} + +layout STFT_inst::calc_output_layout(STFT_node const& node, kernel_impl_params const& impl_param) { + return calc_output_layouts(node, impl_param)[0]; +} + +template +std::vector STFT_inst::calc_output_layouts(STFT_node const& node, kernel_impl_params const& impl_param) { + auto primitive = impl_param.typed_desc(); + + const auto& signal_layout = impl_param.get_input_layout(0); + const auto& window_layout = impl_param.get_input_layout(1); + const auto& frame_size_layout = impl_param.get_input_layout(2); + const auto& frame_step_layout = impl_param.get_input_layout(3); + + std::vector input_shapes = { + signal_layout.get(), + window_layout.get(), + frame_size_layout.get(), + frame_step_layout.get(), + }; + + const auto ta = MemoryAccessor(&impl_param.memory_deps, impl_param.get_stream()); + + std::vector output_shapes; + ov::op::v15::STFT op; + op.set_transpose_frames(primitive->transpose_frames); + output_shapes = shape_infer(&op, input_shapes, ta); + + return {layout{output_shapes[0], signal_layout.data_type, signal_layout.format}}; +} + +std::string STFT_inst::to_string(STFT_node const& node) { + auto node_info = node.desc_to_json(); + json_composite STFT_info; + STFT_info.add("signal", node.input(0).id()); + STFT_info.add("window", node.input(1).id()); + STFT_info.add("framesize", node.input(2).id()); + STFT_info.add("framestep", node.input(3).id()); + STFT_info.add("transpose_frames", node.get_primitive()->transpose_frames); + node_info->add("STFT info", STFT_info); + std::stringstream primitive_description; + node_info->dump(primitive_description); + return primitive_description.str(); +} + +} // namespace cldnn \ No newline at end of file diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl new file mode 100644 index 00000000000000..2f43e1e25aaaa2 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +// alternative: https://github.com/OpenCL/ComplexMath/blob/master/clcomplex.h +typedef float2 cfloat; +#define real(a) ((a).s0) +#define imag(a) ((a).s1) +#define crmult(a, b) ((cfloat)(real(a) * (b), imag(a) * (b))) +#define cadd(a, b) ((cfloat)(real(a) + real(b), imag(a) + imag(b))) +#define csub(a, b) ((cfloat)(real(a) - real(b), imag(a) - imag(b))) +#define expmi(x) ((cfloat)(cos(x), -sin(x))) +#define czero() ((cfloat)(0)) + +// Unoptimized, the most obvious stft impl from the definition. +KERNEL(stft_ref)( + OPTIONAL_SHAPE_INFO_ARG + const __global INPUT0_TYPE* restrict signal, + const __global INPUT1_TYPE* restrict window, + const __global INPUT2_TYPE* restrict frame_size_buff, + const __global INPUT3_TYPE* restrict frame_step_buff, + __global OUTPUT_TYPE* restrict output) +{ + const int freq_id = get_global_id(0); + const int frame_id = get_global_id(1); + const int batch = get_global_id(2); + const int frame_size = (int)frame_size_buff[0]; + const int frame_step = (int)frame_step_buff[0]; + const int window_size = INPUT1_SIZE_X; + + // Handling case where window size is smaller than frame size. + const int start_offset = (frame_size - window_size) / 2; + + const INPUT0_TYPE* restrict signal_for_this_frame = signal + batch*INPUT0_SIZE_X + frame_id*frame_step + start_offset; + + // FT from def for single freq for given frame: + cfloat freq_val = czero(); + + // dft_power = 2*PI*(k/N) from dft def. + const float dft_power = 2.0f * M_PI_F * (float)freq_id / (float)frame_size; + + cfloat err = czero(); + for(int i = 0; i < window_size; ++i) { + const float signal_val = (float)signal_for_this_frame[i]; + const float window_val = (float)window[i]; + const float x_i = signal_val*window_val; + const cfloat e_i = expmi(dft_power*(float)(i+start_offset)); + const cfloat val_i = crmult(e_i, x_i); + + // Kahan sum algo: + const cfloat y = csub(val_i, err); + const cfloat newSum = cadd(freq_val, y); + err = csub(newSum, freq_val); + err = csub(err, y); + freq_val = newSum; + } + +#if TRANSPOSE_FRAMES + const int output_real_idx = OUTPUT_GET_INDEX(batch, freq_id, frame_id, 0); + const int output_imag_idx = OUTPUT_GET_INDEX(batch, freq_id, frame_id, 1); +#else + const int output_real_idx = OUTPUT_GET_INDEX(batch, frame_id, freq_id, 0); + const int output_imag_idx = OUTPUT_GET_INDEX(batch, frame_id, freq_id, 1); +#endif + + output[output_real_idx] = (OUTPUT_TYPE)real(freq_val); + output[output_imag_idx] = (OUTPUT_TYPE)imag(freq_val); +} \ No newline at end of file diff --git a/src/plugins/intel_gpu/src/kernel_selector/common_types.h b/src/plugins/intel_gpu/src/kernel_selector/common_types.h index 06b3e04d40e829..704c1151092c04 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/common_types.h +++ b/src/plugins/intel_gpu/src/kernel_selector/common_types.h @@ -102,7 +102,8 @@ enum class KernelType { SWIGLU, ROPE, DYNAMIC_QUANTIZE, - SEARCH_SORTED + SEARCH_SORTED, + STFT }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp new file mode 100644 index 00000000000000..8eb8ce36c14f2f --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "stft_kernel_base.h" + +#include + +#include "kernel_selector_utils.h" + +namespace kernel_selector { +JitConstants STFTKernelBase::GetJitConstants(const STFT_params& params) const { + JitConstants jit = MakeBaseParamsJitConstants(params); + + jit.AddConstants({MakeJitConstant("TRANSPOSE_FRAMES", params.transpose_frames)}); + + return jit; +} + +void STFTKernelBase::GetUpdateDispatchDataFunc(KernelData& kd) const { + kd.update_dispatch_data_func = [](const Params& params, KernelData& kd) { + const auto& prim_params = static_cast(params); + auto dispatchData = SetDefault(prim_params); + OPENVINO_ASSERT(kd.kernels.size() == 1, "[GPU] Invalid kernels size for update dispatch data func"); + kd.kernels[0].params.workGroups.global = dispatchData.gws; + kd.kernels[0].params.workGroups.local = dispatchData.lws; + kd.kernels[0].skip_execution = KernelData::SkipKernelExecution(prim_params); + }; +} + +STFTKernelBase::DispatchData STFTKernelBase::SetDefault(const STFT_params& params) { + CommonDispatchData dispatchData; + const auto inLayout = params.inputs.front().GetLayout(); + const auto& output = params.outputs.front(); + const auto outLayout = output.GetLayout(); + + OPENVINO_ASSERT(output.Dimentions() == 4); + OPENVINO_ASSERT(output.X().v == 2); + + std::vector> dimsByGws; + + if (params.transpose_frames) { + dispatchData.gws = {output.Feature().v, output.Y().v, output.Batch().v}; + dimsByGws = {{Tensor::DataChannelName::FEATURE}, + {Tensor::DataChannelName::Y}, + {Tensor::DataChannelName::BATCH}}; + } else { + dispatchData.gws = {output.Y().v, output.Feature().v, output.Batch().v}; + dimsByGws = {{Tensor::DataChannelName::Y}, + {Tensor::DataChannelName::FEATURE}, + {Tensor::DataChannelName::BATCH}}; + } + dispatchData.lws = + GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, inLayout, outLayout, dimsByGws); + + return dispatchData; +} + +KernelsData STFTKernelBase::GetCommonKernelsData(const Params& params) const { + assert(params.GetType() == KernelType::STFT); + + const auto& prim_params = static_cast(params); + + auto dispatchData = SetDefault(prim_params); + KernelData k_data = KernelData::Default(params); + + auto cldnn_jit = GetJitConstants(prim_params); + auto entry_point = GetEntryPoint(kernelName, prim_params.layerID, params); + auto jit = CreateJit(kernelName, cldnn_jit, entry_point); + + GetUpdateDispatchDataFunc(k_data); + + auto& kernel = k_data.kernels[0]; + FillCLKernelData(kernel, + dispatchData, + params.engineInfo, + kernelName, + jit, + entry_point, + "", + false, + false, + 4, + GetFusedPrimitiveInputsCount(params), + 1, + prim_params.is_shape_agnostic); + + return {k_data}; +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h new file mode 100644 index 00000000000000..75ad08280e6c74 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "kernel_base_opencl.h" +#include "kernel_selector_params.h" + +namespace kernel_selector { +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// STFT +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +struct STFT_params : public base_params { + STFT_params() : base_params(KernelType::STFT), transpose_frames(false) {} + bool transpose_frames; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// STFTKernelBase +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class STFTKernelBase : public KernelBaseOpenCL { +public: + using KernelBaseOpenCL::KernelBaseOpenCL; + + using DispatchData = CommonDispatchData; + +protected: + JitConstants GetJitConstants(const STFT_params& params) const; + static DispatchData SetDefault(const STFT_params& params); + KernelsData GetCommonKernelsData(const Params& params) const; + void GetUpdateDispatchDataFunc(KernelData& kd) const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp new file mode 100644 index 00000000000000..dfc9a9596fe342 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "stft_kernel_ref.h" + +namespace kernel_selector { +ParamsKey STFTKernelRef::GetSupportedKey() const { + ParamsKey k; + + k.EnableInputDataType(Datatype::INT32); + k.EnableInputDataType(Datatype::INT64); + k.EnableInputDataType(Datatype::F32); + k.EnableInputDataType(Datatype::F16); + + k.EnableOutputDataType(Datatype::F32); + k.EnableOutputDataType(Datatype::F16); + + k.EnableInputLayout(DataLayout::bfyx); + + k.EnableOutputLayout(DataLayout::bfyx); + + k.EnableBatching(); + k.EnableDifferentTypes(); + k.EnableDynamicShapesSupport(); + return k; +} + +KernelsData STFTKernelRef::GetKernelsData(const Params& params) const { + return GetCommonKernelsData(params); +} + +KernelsPriority STFTKernelRef::GetKernelsPriority(const Params& /*params*/) const { + return FORCE_PRIORITY_9; +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h new file mode 100644 index 00000000000000..3651539fd7dadb --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "stft_kernel_base.h" + +namespace kernel_selector { +class STFTKernelRef : public STFTKernelBase { +public: + STFTKernelRef() : STFTKernelBase("stft_ref") {} + + KernelsData GetKernelsData(const Params& params) const override; + KernelsPriority GetKernelsPriority(const Params& params) const override; + ParamsKey GetSupportedKey() const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp new file mode 100644 index 00000000000000..02edc108c2e680 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "stft_kernel_selector.h" + +#include "stft_kernel_ref.h" + +namespace kernel_selector { +STFT_kernel_selector::STFT_kernel_selector() { + Attach(); +} + +KernelsData STFT_kernel_selector::GetBestKernels(const Params& params) const { + return GetNaiveBestKernel(params, KernelType::STFT); +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h new file mode 100644 index 00000000000000..7e1f9e714cc203 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "kernel_selector.h" + +namespace kernel_selector { +class STFT_kernel_selector : public kernel_selector_base { +public: + static STFT_kernel_selector& Instance() { + static STFT_kernel_selector instance; + return instance; + } + + STFT_kernel_selector(); + + KernelsData GetBestKernels(const Params& params) const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/plugin/ops/stft.cpp b/src/plugins/intel_gpu/src/plugin/ops/stft.cpp new file mode 100644 index 00000000000000..9b082cf717683d --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/ops/stft.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/stft.hpp" + +#include "intel_gpu/plugin/common_utils.hpp" +#include "intel_gpu/plugin/program_builder.hpp" +#include "intel_gpu/primitives/stft.hpp" + +namespace ov { +namespace intel_gpu { + +static void CreateSTFTOp(ProgramBuilder& p, const std::shared_ptr& op) { + validate_inputs_count(op, {4}); + auto inputs = p.GetInputInfo(op); + auto prim = + cldnn::STFT(layer_type_name_ID(op), inputs[0], inputs[1], inputs[2], inputs[3], op->get_transpose_frames()); + p.add_primitive(*op, prim); +} + +REGISTER_FACTORY_IMPL(v15, STFT); + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/program_builder.cpp b/src/plugins/intel_gpu/src/plugin/program_builder.cpp index a9bb813d0ce587..a87c5cbcbd87b4 100644 --- a/src/plugins/intel_gpu/src/plugin/program_builder.cpp +++ b/src/plugins/intel_gpu/src/plugin/program_builder.cpp @@ -10,6 +10,7 @@ #include "openvino/op/lstm_sequence.hpp" #include "openvino/op/loop.hpp" #include "openvino/op/search_sorted.hpp" +#include "openvino/op/stft.hpp" #include "ov_ops/dynamic_quantize.hpp" #include "intel_gpu/plugin/common_utils.hpp" @@ -360,7 +361,8 @@ bool ProgramBuilder::requires_new_shape_infer(const std::shared_ptr& o // HACK: SearchSorted has specific shape requirements. // E.g. static input shapes: sorted:[8], values:[2,3,4] are prefectly fine, // but sorted:[8,1,1,1], values:[2,3,4,1] is not valid. - if (ov::is_type(op)) + // Similar case for STFT. + if (ov::is_type(op) || ov::is_type(op)) return true; if (ov::is_type(op)) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp new file mode 100644 index 00000000000000..755c0514ae436b --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_op_tests/stft.hpp" + +#include "common_test_utils/test_constants.hpp" + +namespace ov { +namespace test { + +INSTANTIATE_TEST_SUITE_P(smoke_STFT_static, + STFTLayerTest, + STFTLayerTest::GetTestDataForDevice(ov::test::utils::DEVICE_GPU), + STFTLayerTest::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp new file mode 100644 index 00000000000000..060ebfd4a071fd --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp @@ -0,0 +1,208 @@ +// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "test_utils.h" + +using namespace cldnn; +using namespace ::tests; + +namespace { + +constexpr float REL_EPS = 2e-3f; +constexpr float ABS_EPS = 1e-5f; + +namespace helpers { +// TODO: Move to common place. + +// Converts float vector to another type vector. +template +std::vector ConverFloatVector(const std::vector& vec) { + std::vector ret; + ret.reserve(vec.size()); + for (const auto& val : vec) { + ret.push_back(T(val)); + } + return ret; +} + +// Allocates tensoer with given shape and data. +template +memory::ptr AllocateTensor(ov::PartialShape shape, const std::vector& data) { + const layout lo = {shape, ov::element::from(), cldnn::format::bfyx}; + EXPECT_EQ(lo.get_linear_size(), data.size()); + memory::ptr tensor = get_test_engine().allocate_memory(lo); + set_values(tensor, data); + return tensor; +} + +template +void CompareTypedBuffers(const memory::ptr& output, const memory::ptr& expectedOutput, cldnn::stream& stream) { + mem_lock output_ptr(output, stream); + mem_lock wanted_output_ptr(expectedOutput, stream); + + ASSERT_EQ(output->get_layout(), expectedOutput->get_layout()); + ASSERT_EQ(output_ptr.size(), wanted_output_ptr.size()); + for (size_t i = 0; i < output_ptr.size(); ++i) + ASSERT_TRUE(are_equal(wanted_output_ptr[i], output_ptr[i], REL_EPS, ABS_EPS)) << "at index " << i; +} + +void CompareBuffers(const memory::ptr& output, const memory::ptr& expectedOutput, cldnn::stream& stream) { + ASSERT_EQ(output->get_layout(), expectedOutput->get_layout()); + auto type = output->get_layout().data_type; + + switch (type) { + case data_types::f32: + helpers::CompareTypedBuffers(output, expectedOutput, stream); + break; + + default: + GTEST_FAIL() << "Unsupported data type: " << type; + break; + } +} + +} // namespace helpers + +struct STFTTestParams { + ov::PartialShape signalShape; + ov::PartialShape windowShape; + ov::PartialShape outputShape; + int64_t frameSize; + int64_t frameStep; + bool transposedFrames; + std::vector signalData; + std::vector windowData; + std::vector expectedOutput; + std::string testcaseName; +}; + +class stft_test : public ::testing::TestWithParam { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "signalShape=" << param.signalShape; + result << "_windowShape=" << param.windowShape; + result << "_outputShape=" << param.outputShape; + result << "_frameSize=" << param.frameSize; + result << "_frameStep=" << param.frameStep; + result << "_transposedFrames=" << param.transposedFrames; + result << "_" << param.testcaseName; + return result.str(); + } + + struct STFTInferenceParams { + bool transposedFrames; + memory::ptr signal; + memory::ptr window; + memory::ptr frameSize; + memory::ptr frameStep; + memory::ptr expectedOutput; + }; + + template + STFTInferenceParams PrepareInferenceParams(const STFTTestParams& testParam) { + using T = typename ov::element_type_traits::value_type; + STFTInferenceParams ret; + + ret.transposedFrames = testParam.transposedFrames; + + ret.signal = + helpers::AllocateTensor(testParam.signalShape, helpers::ConverFloatVector(testParam.signalData)); + ret.window = + helpers::AllocateTensor(testParam.windowShape, helpers::ConverFloatVector(testParam.windowData)); + ret.expectedOutput = + helpers::AllocateTensor(testParam.outputShape, helpers::ConverFloatVector(testParam.expectedOutput)); + + ret.frameStep = helpers::AllocateTensor({}, {testParam.frameStep}); + ret.frameSize = helpers::AllocateTensor({}, {testParam.frameSize}); + + return ret; + } + + void Execute(const STFTInferenceParams& params) { + // Prepare the network. + auto stream = get_test_stream_ptr(get_test_default_config(engine_)); + + auto scalar_layout = params.frameSize->get_layout(); + scalar_layout.set_partial_shape({}); + + topology topology; + topology.add(input_layout("signal", params.signal->get_layout())); + topology.add(input_layout("window", params.window->get_layout())); + topology.add(input_layout("frameSize", scalar_layout)); + topology.add(input_layout("frameStep", scalar_layout)); + topology.add(STFT("stft", + input_info("signal"), + input_info("window"), + input_info("frameSize"), + input_info("frameStep"), + params.transposedFrames)); + + cldnn::network::ptr network = get_network(engine_, topology, get_test_default_config(engine_), stream, false); + + network->set_input_data("signal", params.signal); + network->set_input_data("window", params.window); + network->set_input_data("frameSize", params.frameSize); + network->set_input_data("frameStep", params.frameStep); + + // Run and check results. + auto outputs = network->execute(); + + auto output = outputs.at("stft").get_memory(); + + helpers::CompareBuffers(output, params.expectedOutput, get_test_stream()); + } + +private: + engine& engine_ = get_test_engine(); +}; + +std::vector generateTestParams() { + std::vector params; +#define TEST_DATA(signalShape, \ + windowShape, \ + outputShape, \ + frameSize, \ + frameStep, \ + transposedFrames, \ + signalData, \ + windowData, \ + expectedOutput, \ + testcaseName) \ + params.push_back(STFTTestParams{signalShape, \ + windowShape, \ + outputShape, \ + frameSize, \ + frameStep, \ + transposedFrames, \ + signalData, \ + windowData, \ + expectedOutput, \ + testcaseName}); + +#include "unit_test_utils/tests_data/stft_data.h" +#undef TEST_DATA + + return params; +} + +} // namespace + +#define STFT_TEST_P(precision) \ + TEST_P(stft_test, ref_comp_##precision) { \ + Execute(PrepareInferenceParams(GetParam())); \ + } + +STFT_TEST_P(f32); + +INSTANTIATE_TEST_SUITE_P(stft_test_suit, + stft_test, + testing::ValuesIn(generateTestParams()), + stft_test::getTestCaseName); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index 19de8ce5eb6a79..362258598a1344 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -240,7 +240,7 @@ static std::map inputRanges = { {ov::op::v1::BatchToSpace::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v15::BitwiseLeftShift::get_type_info_static(), Range({{0, 5}, {0, 4}}, {})}, {ov::op::v15::BitwiseRightShift::get_type_info_static(), Range({{0, 5}, {0, 4}}, {})}, - {ov::op::v15::STFT::get_type_info_static(), Range({{16, 24}, {1, 16}}, {{-100, 100}, {-100, 100}})}, + {ov::op::v15::STFT::get_type_info_static(), Range({{16, 24}, {1, 16}}, {{0, 1, 10000}, {0, 1, 10000}})}, }; class ModelRange { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp index 0e165ace364eae..22de0ef8c75b50 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp @@ -25,6 +25,17 @@ typedef std::tuple, class STFTLayerTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); + using TGenData = + testing::internal::CartesianProductHolder>, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ValueArray>; + + static const TGenData GetTestDataForDevice(const char* deviceName); protected: void SetUp() override; diff --git a/src/tests/functional/shared_test_classes/src/single_op/stft.cpp b/src/tests/functional/shared_test_classes/src/single_op/stft.cpp index 2d25b6f386024b..141728b6d60fb7 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/stft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/stft.cpp @@ -84,5 +84,82 @@ void STFTLayerTest::SetUp() { function = std::make_shared(STFT->outputs(), ov::ParameterVector{in_signal, in_window}); } } + +const STFTLayerTest::TGenData STFTLayerTest::GetTestDataForDevice(const char* deviceName) { + const std::vector data_type = {ov::element::bf16, ov::element::f16}; + const std::vector step_size_type = {ov::element::i32, ov::element::i64}; + + const std::vector> input_shapes = { + { + // Static shapes + {{}, {{128}}}, // 1st input + {{}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Static shapes + {{}, {{1, 128}}}, // 1st input + {{}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Static shapes + {{}, {{2, 226}}}, // 1st input + {{}, {{16}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims in the first input shape + {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input + {{}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims in the first and second input shape + {{-1}, {{128}}}, // 1st input + {{-1}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims in the first and second input shape + {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input + {{-1}, {{8}, {16}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims with range in the first and second input shape + {{{2, 4}, {1, 300}}, {{2, 226}, {3, 128}}}, // 1st input + {{{3, 16}}, {{4}, {16}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }}; + + const std::vector frame_size = {16, 24}; + const std::vector step_size = {2, 3, 4}; + + const std::vector transpose_frames = { + false, + true, + }; + + std::vector in_types = {utils::InputLayerType::CONSTANT, utils::InputLayerType::PARAMETER}; + + auto data = ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(frame_size), + ::testing::ValuesIn(step_size), + ::testing::ValuesIn(transpose_frames), + ::testing::ValuesIn(data_type), + ::testing::ValuesIn(step_size_type), + ::testing::ValuesIn(in_types), + ::testing::Values(deviceName)); + + return data; +} } // namespace test } // namespace ov diff --git a/src/tests/test_utils/unit_test_utils/tests_data/stft_data.h b/src/tests/test_utils/unit_test_utils/tests_data/stft_data.h new file mode 100644 index 00000000000000..4a6257743f560b --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/tests_data/stft_data.h @@ -0,0 +1,1299 @@ +#pragma once + +#define LIST(...) \ + { __VA_ARGS__ } + +// TEST_DATA(signalShape, +// windowShape, +// outputShape, +// frameSize, +// frameStep, +// transposedFrames, +// signalData, +// windowData, +// expectedOutput, +// testcaseName) + +// NOTE: expected output were generated using pyTorch.searchsorted implementation. + +TEST_DATA(LIST(48), + LIST(16), + LIST(9, 3, 2), + 16, + 16, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + 2.061596, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + -1.064999, + 0.282346, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + -0.660887, + -0.935516, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + 0.566273, + 2.586032, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.90856, + -2.40086, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + 2.472012, + 0.479764, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + -1.628981, + 0.973055, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.790362, + -0.677371, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 3.969412, + 0.0, + 0.012426, + 0.0), + "test_case_0"); + +TEST_DATA(LIST(1, 48), + LIST(16), + LIST(1, 9, 3, 2), + 16, + 16, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + 2.061596, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + -1.064999, + 0.282346, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + -0.660887, + -0.935516, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + 0.566273, + 2.586032, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.90856, + -2.40086, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + 2.472012, + 0.479764, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + -1.628981, + 0.973055, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.790362, + -0.677371, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 3.969412, + 0.0, + 0.012426, + 0.0), + "test_case_1"); + +TEST_DATA(LIST(1, 48), + LIST(16), + LIST(1, 9, 2, 2), + 16, + 32, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 0.012426, + 0.0), + "test_case_2"); + +TEST_DATA(LIST(3, 48), + LIST(16), + LIST(3, 9, 2, 2), + 16, + 32, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766, + -0.700153, + 0.785022, + 0.449904, + -0.437027, + 0.846442, + -2.708081, + -0.357738, + 1.859077, + -1.122008, + 0.797016, + -0.205318, + 0.620443, + -1.210487, + -0.100233, + -0.644188, + 1.252426, + -1.503346, + 1.685813, + -0.655548, + 0.148169, + 0.98681, + -1.806409, + -0.789457, + 0.934387, + 0.819341, + -0.359637, + -0.394646, + -0.040578, + 1.10817, + 1.745871, + -0.706232, + 2.154361, + -0.417549, + 0.724758, + -1.090765, + 0.9193, + 0.535271, + -0.979016, + 0.870831, + -0.405604, + -0.192899, + 0.242223, + -2.103053, + -0.234349, + -1.273937, + -0.334684, + -1.239732, + 1.185672, + 1.292743, + 0.741054, + -0.700485, + -0.252933, + -0.760226, + 0.68806, + 0.761746, + 0.065581, + -0.189028, + 0.253604, + 0.17645, + 0.993091, + 0.771911, + -0.45738, + -0.123291, + -0.150833, + -1.207279, + -1.033516, + -0.975503, + 0.626698, + 0.50241, + -1.377113, + -0.788385, + 0.043618, + -0.945737, + 0.093409, + -0.43187, + 0.748239, + 1.084854, + 1.147015, + 0.171417, + -0.231462, + -1.082049, + 0.213656, + -0.888956, + 0.489294, + -0.722142, + 0.920915, + -1.327819, + -1.384117, + -1.432893, + -0.535934, + 0.48227, + 0.220006, + -0.589304, + -1.305996, + -1.089244, + 1.762965), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 0.012426, + 0.0, + -0.789636, + 0.0, + -2.698145, + 0.0, + -0.35829, + 0.7881, + 1.297723, + -2.351696, + 2.059313, + -1.108987, + 0.58228, + 1.450589, + -1.325991, + -0.840667, + -1.163221, + -0.362671, + -0.79498, + 3.236621, + 0.830829, + 0.296243, + 2.930576, + -2.854989, + 0.544256, + 1.448767, + -3.939284, + -0.528339, + -0.229411, + -3.582516, + 3.241738, + 2.887635, + 0.092836, + 3.078159, + -2.836527, + 0.0, + -1.212438, + 0.0, + 1.769482, + 0.0, + -4.335095, + 0.0, + -1.254114, + 0.665334, + 3.115947, + 0.51333, + -0.628684, + 0.238478, + -2.409475, + -2.158762, + 2.008016, + -0.98037, + 2.824623, + 2.211092, + -0.863346, + 0.059172, + -1.077194, + -0.687371, + -0.326849, + 1.138742, + -0.569806, + 0.200439, + 0.238943, + -0.929741, + 0.855134, + 0.989918, + 0.329058, + -0.27983, + 0.360809, + -1.762999, + -0.775532, + 0.0, + -1.864981, + 0.0), + "test_case_3"); + +TEST_DATA(LIST(3, 48), + LIST(16), + LIST(3, 2, 9, 2), + 16, + 32, + false, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766, + -0.700153, + 0.785022, + 0.449904, + -0.437027, + 0.846442, + -2.708081, + -0.357738, + 1.859077, + -1.122008, + 0.797016, + -0.205318, + 0.620443, + -1.210487, + -0.100233, + -0.644188, + 1.252426, + -1.503346, + 1.685813, + -0.655548, + 0.148169, + 0.98681, + -1.806409, + -0.789457, + 0.934387, + 0.819341, + -0.359637, + -0.394646, + -0.040578, + 1.10817, + 1.745871, + -0.706232, + 2.154361, + -0.417549, + 0.724758, + -1.090765, + 0.9193, + 0.535271, + -0.979016, + 0.870831, + -0.405604, + -0.192899, + 0.242223, + -2.103053, + -0.234349, + -1.273937, + -0.334684, + -1.239732, + 1.185672, + 1.292743, + 0.741054, + -0.700485, + -0.252933, + -0.760226, + 0.68806, + 0.761746, + 0.065581, + -0.189028, + 0.253604, + 0.17645, + 0.993091, + 0.771911, + -0.45738, + -0.123291, + -0.150833, + -1.207279, + -1.033516, + -0.975503, + 0.626698, + 0.50241, + -1.377113, + -0.788385, + 0.043618, + -0.945737, + 0.093409, + -0.43187, + 0.748239, + 1.084854, + 1.147015, + 0.171417, + -0.231462, + -1.082049, + 0.213656, + -0.888956, + 0.489294, + -0.722142, + 0.920915, + -1.327819, + -1.384117, + -1.432893, + -0.535934, + 0.48227, + 0.220006, + -0.589304, + -1.305996, + -1.089244, + 1.762965), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + -0.832353, + 0.394573, + -1.071078, + -0.247753, + 0.658023, + -0.816808, + -0.030171, + 2.561505, + -0.226757, + -2.46842, + 0.078811, + 0.923822, + 0.915031, + -0.677954, + -1.483484, + 0.0, + -2.69633, + 0.0, + 1.075557, + -1.680968, + 0.872213, + 0.11946, + -0.348891, + 1.429143, + -0.474217, + -0.146423, + -0.450706, + -0.031404, + 1.696735, + -2.559718, + -1.028739, + 3.200351, + 0.012426, + 0.0, + -0.789636, + 0.0, + -0.35829, + 0.7881, + 2.059313, + -1.108987, + -1.325991, + -0.840667, + -0.79498, + 3.236621, + 2.930576, + -2.854989, + -3.939284, + -0.528339, + 3.241738, + 2.887635, + -2.836527, + 0.0, + -2.698145, + 0.0, + 1.297723, + -2.351696, + 0.58228, + 1.450589, + -1.163221, + -0.362671, + 0.830829, + 0.296243, + 0.544256, + 1.448767, + -0.229411, + -3.582516, + 0.092836, + 3.078159, + -1.212438, + 0.0, + 1.769482, + 0.0, + -1.254114, + 0.665334, + -0.628684, + 0.238478, + 2.008016, + -0.98037, + -0.863346, + 0.059172, + -0.326849, + 1.138742, + 0.238943, + -0.929741, + 0.329058, + -0.27983, + -0.775532, + 0.0, + -4.335095, + 0.0, + 3.115947, + 0.51333, + -2.409475, + -2.158762, + 2.824623, + 2.211092, + -1.077194, + -0.687371, + -0.569806, + 0.200439, + 0.855134, + 0.989918, + 0.360809, + -1.762999, + -1.864981, + 0.0), + "test_case_4"); + +TEST_DATA(LIST(1, 48), + LIST(33), + LIST(1, 2, 17, 2), + 33, + 11, + false, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.009036, + 0.035816, + 0.079373, + 0.138133, + 0.209972, + 0.292293, + 0.382121, + 0.476209, + 0.571157, + 0.663534, + 0.75, + 0.82743, + 0.893027, + 0.944418, + 0.979746, + 0.997736, + 0.997736, + 0.979746, + 0.944418, + 0.893027, + 0.82743, + 0.75, + 0.663534, + 0.571157, + 0.476209, + 0.382121, + 0.292292, + 0.209971, + 0.138133, + 0.079373, + 0.035816, + 0.009036), + LIST(2.579306, + 0.0, + -0.152245, + -0.738268, + -2.416611, + 1.261868, + 2.867594, + -1.060565, + -3.284244, + 0.170454, + 2.868702, + -0.841069, + -1.104891, + 3.300689, + -1.140553, + -4.425755, + 0.251539, + 2.542669, + 1.792233, + -0.387007, + 1.135473, + 1.006229, + -3.639657, + -1.375989, + 1.341596, + 1.403905, + -0.208277, + -2.707592, + 0.588243, + 3.234172, + 1.73603, + -4.193725, + -1.924587, + 6.196737, + 3.055838, + 0.0, + -2.899791, + 0.050701, + 1.185766, + 0.087218, + 0.653565, + 0.058316, + 0.340882, + -0.196132, + -0.708827, + -1.955343, + -1.790521, + 2.863453, + 2.38143, + -0.021196, + -1.090831, + -2.4223, + 2.221659, + 2.73066, + -4.076646, + -3.259353, + 3.247396, + 2.531169, + -1.041836, + -0.880713, + 0.399545, + 2.26406, + -0.611558, + -1.956384, + 1.462047, + 1.335275, + -1.2002, + -5.702536), + "test_case_5"); + +TEST_DATA(LIST(48), + LIST(7), + LIST(6, 13, 2), + 11, + 3, + true, + LIST(-0.41676, + -0.05627, + -2.1362, + 1.64027, + -1.79344, + -0.84175, + 0.50288, + -1.24529, + -1.05795, + -0.90901, + 0.55145, + 2.29221, + 0.04154, + -1.11793, + 0.53906, + -0.59616, + -0.01913, + 1.175, + -0.74787, + 0.00903, + -0.87811, + -0.15643, + 0.25657, + -0.98878, + -0.33882, + -0.23618, + -0.63766, + -1.18761, + -1.42122, + -0.1535, + -0.26906, + 2.23137, + -2.43477, + 0.11273, + 0.37044, + 1.35963, + 0.50186, + -0.84421, + 0.00001, + 0.54235, + -0.31351, + 0.77101, + -1.86809, + 1.73118, + 1.46768, + -0.33568, + 0.61134, + 0.04797), + LIST(0.0, 0.25, 0.75, 1.0, 0.75, 0.25, 0.0), + LIST(-1.71092, + 0., + -2.41009, + 0., + 2.23022, + 0., + -0.7409, + 0., + 0.45297, + 0., + -1.11149, + 0., + -1.14862, + 0., + -2.14551, + 0., + -1.16026, + 0., + -0.65135, + 0., + 1.83099, + 0., + -0.1793, + 0., + -0.2968, + 0., + 1.47212, + 0.71877, + 2.17268, + 0.79158, + -2.28473, + -0.93586, + 0.4625, + 0.34192, + -0.56009, + -0.32899, + 0.93528, + 0.44276, + 1.11077, + 0.05564, + 1.82719, + -0.1221, + 0.71587, + 1.50743, + 1.10802, + -0.41842, + -1.71345, + -0.67438, + 0.05781, + 0.40969, + 0.4558, + -0.24137, + -0.54856, + -1.56669, + -1.47087, + -1.22889, + 2.1535, + 1.84441, + 0.18738, + -0.28908, + 0.66134, + 0.88008, + -0.66811, + -0.52077, + -1.02705, + -0.15929, + -1.12869, + 0.2893, + 0.0583, + -1.66476, + -2.16394, + 0.18383, + 1.42389, + 1.02343, + 0.32308, + -0.7337, + -0.68826, + 0.55139, + -0.91886, + 1.85309, + 0.52177, + 0.97814, + -1.50306, + -2.29021, + -0.76526, + -0.28515, + -0.47423, + -1.4385, + 0.63386, + 0.43591, + 0.90989, + 0.38369, + 0.51776, + -0.36462, + -0.31809, + 0.57129, + 2.99689, + 0.98808, + -1.06897, + -0.98176, + -0.81284, + 0.72147, + 0.63521, + -1.1571, + 1.74128, + -1.03922, + 0.14692, + -0.1082, + 0.64531, + 1.98433, + 0.856, + 1.12631, + 0.14133, + 1.66429, + -0.63884, + -0.57479, + -0.6772, + -0.71798, + -0.19529, + 0.22579, + 0.09013, + 0.66192, + -2.7275, + -2.70068, + 0.6808, + 0.74142, + 0.95724, + -0.28153, + -0.33733, + 2.09067, + -0.89051, + -0.04374, + -0.16546, + -0.69762, + -0.12612, + -1.43585, + -0.37017, + -1.74231, + 0.00518, + -1.6207, + 0.29356, + 0.84215, + 0.2579, + 0.98549, + 0.05179, + -0.0244, + 0.03393, + -1.30044, + 1.1122, + 3.98255, + -0.23778, + -0.54982, + -0.43563, + -0.19685, + 0.08299, + -2.86001), + "test_case_6"); \ No newline at end of file From 19e7dae3555825b7bbabd9696a825ec98fe8a400 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 19 Dec 2024 17:40:34 +0400 Subject: [PATCH 26/60] [TF FE] Stabilize tests for unary operation defined on full real domain (#28111) **Details:** Stabilize tests for unary operation defined on full real domain **Ticket:** TBD --------- Signed-off-by: Kazantsev, Roman --- .../tensorflow_tests/test_tf_UnaryOps.py | 51 +++----------- .../test_tf_UnaryOpsAllRealDomain.py | 70 +++++++++++++++++++ 2 files changed, 78 insertions(+), 43 deletions(-) create mode 100644 tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py index 902adebc2226fc..69f08acd9754d2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py @@ -1,11 +1,10 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import platform -import sys - import numpy as np +import platform import pytest +import sys from common.tf_layer_test_class import CommonTFLayerTest @@ -14,7 +13,7 @@ class TestUnaryOps(CommonTFLayerTest): def _prepare_input(self, inputs_dict): non_negative = ['Sqrt', 'Log'] - narrow_borders = ["Sinh", "Cosh", "Tanh", "Exp", "Selu"] + narrow_borders = ["Tanh"] within_one = ['Asin', 'Acos', 'Atanh'] from_one = ['Acosh'] @@ -76,25 +75,14 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_legacy_fronte 'Asin': tf.math.asin, 'Asinh': tf.math.asinh, 'Atan': tf.math.atan, - 'Atanh': tf.math.atanh, 'BitwiseNot': tf.bitwise.invert, 'Ceiling': tf.math.ceil, - 'Cos': tf.math.cos, - 'Cosh': tf.math.cosh, - 'Elu': tf.nn.elu, - 'Erf': tf.math.erf, - 'Exp': tf.math.exp, 'Floor': tf.math.floor, 'Log': tf.math.log, 'LogicalNot': tf.math.logical_not, # 'Mish': tfa.activations.mish, # temporarily moved to `create_net_with_mish()` 'Negative': tf.math.negative, - 'Selu': tf.nn.selu, - 'Sigmoid': tf.nn.sigmoid, 'Sign': tf.math.sign, - 'Sin': tf.math.sin, - 'Sinh': tf.math.sinh, - 'SoftPlus': tf.nn.softplus, 'Square': tf.math.square, 'Tan': tf.math.tan, 'Tanh': tf.math.tanh, @@ -126,15 +114,8 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_legacy_fronte test_data_precommit = [dict(shape=[4, 6, 8, 10, 12])] @pytest.mark.parametrize("params", test_data_precommit) - @pytest.mark.parametrize("op_type", ['Elu', - 'Sigmoid', - 'Sin', - 'Sinh', - 'Cos', - 'Cosh', - 'Abs', + @pytest.mark.parametrize("op_type", ['Abs', 'Negative', - 'Exp', 'Tan', 'Tanh', 'Floor', @@ -145,15 +126,11 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_legacy_fronte 'Atan', 'Log', 'Sign', - 'SoftPlus', - 'Atanh', 'Acosh', 'Asinh', 'LogicalNot', 'Square', - 'Erf', - 'BitwiseNot' - ]) + 'BitwiseNot']) @pytest.mark.nightly def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_legacy_frontend): @@ -188,15 +165,8 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, dict(shape=[4, 6, 8, 10, 12])] @pytest.mark.parametrize("params", test_data) - @pytest.mark.parametrize("op_type", ['Elu', - 'Sigmoid', - 'Sin', - 'Sinh', - 'Cos', - 'Cosh', - 'Abs', + @pytest.mark.parametrize("op_type", ['Abs', 'Negative', - 'Exp', 'Tan', 'Tanh', 'Floor', @@ -206,17 +176,12 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, 'Acos', 'Atan', 'Log', - 'LogicalNot', 'Sign', - 'SoftPlus', - 'Atanh', 'Acosh', 'Asinh', + 'LogicalNot', 'Square', - 'Erf', - 'Selu', - 'BitwiseNot' - ]) + 'BitwiseNot']) @pytest.mark.nightly @pytest.mark.skipif(sys.platform == 'darwin', reason="Ticket - 122182") @pytest.mark.xfail(platform.machine() in ["aarch64", "arm64", "ARM64"], reason='Ticket - 122716') diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py new file mode 100644 index 00000000000000..4ff4d589cbae32 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py @@ -0,0 +1,70 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import platform +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + +rng = np.random.default_rng(253512) + + +class TestUnaryOpsAllRealDomain(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x:0' in inputs_info, "Test error: inputs_info must contain `x`" + x_shape = inputs_info['x:0'] + inputs_data = {} + inputs_data['x:0'] = rng.uniform(-5.0, 5.0, x_shape).astype(self.input_type) + return inputs_data + + def create_unary_net(self, input_shape, input_type, op_type): + op_type_map = { + 'Elu': lambda x: tf.raw_ops.Elu(features=x), + 'Sigmoid': tf.raw_ops.Sigmoid, + 'Sin': tf.raw_ops.Sin, + 'Sinh': tf.raw_ops.Sinh, + 'Cos': tf.raw_ops.Cos, + 'Cosh': tf.raw_ops.Cosh, + 'Exp': tf.raw_ops.Exp, + 'Atan': tf.raw_ops.Atan, + 'Softplus': lambda x: tf.raw_ops.Softplus(features=x), + 'Erf': tf.raw_ops.Erf, + 'Selu': lambda x: tf.raw_ops.Selu(features=x) + } + + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + op_type_map[op_type](x=x) + tf.compat.v1.global_variables_initializer() + + tf_net = sess.graph_def + + return tf_net, None + + @pytest.mark.parametrize("input_shape", [[], [2], [3, 4], [3, 2, 4]]) + @pytest.mark.parametrize("input_type", [np.float16, np.float32, np.float64]) + @pytest.mark.parametrize("op_type", ['Elu', + 'Sigmoid', + 'Sin', + 'Sinh', + 'Cos', + 'Cosh', + 'Exp', + 'Atan', + 'Softplus', + 'Erf', + 'Selu']) + @pytest.mark.precommit + @pytest.mark.nightly + def test_unary_ops(self, input_shape, input_type, op_type, + ie_device, precision, ir_version, temp_dir, + use_legacy_frontend): + if platform.machine() in ["aarch64", "arm64", "ARM64"] and op_type in ['Cos', 'Cosh', 'Sinh', 'Exp']: + pytest.skip("159585: accuracy error on ARM") + self._test(*self.create_unary_net(input_shape, input_type, op_type), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_legacy_frontend=use_legacy_frontend, custom_eps=1e-3) From dac42a9301f237bd30052b32fa350fc4865b5d44 Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Thu, 19 Dec 2024 14:45:57 +0000 Subject: [PATCH 27/60] [CI] [GHA] Add retries and timeout for Fedora, use `dnf` (#27882) ### Details: - Should mitigate things like in https://github.com/openvinotoolkit/openvino/actions/runs/12081540979/job/33691617750 ### Tickets: - *158474* --------- Co-authored-by: Mikhail Ryzhov --- .github/dockerfiles/docker_tag | 2 +- .github/dockerfiles/ov_build/fedora_29/Dockerfile | 6 +++++- .github/dockerfiles/ov_test/fedora_33/Dockerfile | 6 +++++- .github/workflows/fedora_29.yml | 4 ++-- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/dockerfiles/docker_tag b/.github/dockerfiles/docker_tag index bcfa07fb5c24b3..1dc77e89521bfe 100644 --- a/.github/dockerfiles/docker_tag +++ b/.github/dockerfiles/docker_tag @@ -1 +1 @@ -pr-27597 +pr-27882 diff --git a/.github/dockerfiles/ov_build/fedora_29/Dockerfile b/.github/dockerfiles/ov_build/fedora_29/Dockerfile index e5f400e2915e9c..0b9911ac707b13 100644 --- a/.github/dockerfiles/ov_build/fedora_29/Dockerfile +++ b/.github/dockerfiles/ov_build/fedora_29/Dockerfile @@ -3,7 +3,11 @@ FROM ${REGISTRY}/library/fedora:29 USER root -RUN yum update -y && yum install -y \ +# dnf configuration +RUN echo "timeout=60" >> /etc/dnf/dnf.conf && \ + echo "retries=10" >> /etc/dnf/dnf.conf + +RUN dnf update -y && dnf install -y \ git \ curl \ python3 \ diff --git a/.github/dockerfiles/ov_test/fedora_33/Dockerfile b/.github/dockerfiles/ov_test/fedora_33/Dockerfile index 6e0fcc7d35156b..4c5b2037e60578 100644 --- a/.github/dockerfiles/ov_test/fedora_33/Dockerfile +++ b/.github/dockerfiles/ov_test/fedora_33/Dockerfile @@ -3,7 +3,11 @@ FROM ${REGISTRY}/library/fedora:33 USER root -RUN yum update -y && yum install -y \ +# dnf configuration +RUN echo "timeout=60" >> /etc/dnf/dnf.conf && \ + echo "retries=10" >> /etc/dnf/dnf.conf + +RUN dnf update -y && dnf install -y \ git \ curl \ python3 \ diff --git a/.github/workflows/fedora_29.yml b/.github/workflows/fedora_29.yml index 0dd101225dc533..6d128f33fca274 100644 --- a/.github/workflows/fedora_29.yml +++ b/.github/workflows/fedora_29.yml @@ -131,10 +131,10 @@ jobs: # install previous release version mv /tmp/openvino-2023.repo /etc/yum.repos.d - yum install -y openvino + dnf install -y openvino # install current version - yum install --allowerasing -y *.rpm + dnf install --allowerasing -y *.rpm working-directory: ${{ env.RPM_PACKAGES_DIR }} - name: Test RPM packages From e9400c5f1f451a686a9365027a024a7501670890 Mon Sep 17 00:00:00 2001 From: Gorokhov Dmitriy Date: Thu, 19 Dec 2024 19:20:04 +0400 Subject: [PATCH 28/60] [CPU] Enable compressed FC via oneDNN Matmul primitive (#27459) ### Details: - This PR enables execution FullyConnected operations via OneDNN Matmul Primitive - Matmul_weights_decompression tests are splitted on x64 and arm instances, ARM tests run well via ref matmul. - Newly added functionality is still under debug caps. To try it out: -- Build OV with: -DENABLE_DEBUG_CAPS=ON cmake option -- export OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC=1 --- src/plugins/intel_cpu/src/cpu_memory.cpp | 2 +- .../intel_cpu/src/dnnl_postops_composer.cpp | 68 +++++- .../intel_cpu/src/dnnl_postops_composer.h | 15 +- .../src/nodes/common/cpu_convert.cpp | 13 +- .../dnnl/dnnl_fullyconnected_primitive.cpp | 16 +- .../executors/dnnl/dnnl_matmul_primitive.cpp | 90 +++++--- .../executors/dnnl/dnnl_matmul_primitive.hpp | 2 + .../fullyconnected_implementations.cpp | 5 +- .../intel_cpu/src/nodes/fullyconnected.cpp | 40 +++- .../intel_cpu/src/nodes/fullyconnected.h | 2 + .../convert_to_cpu_specific_opset.hpp | 25 +-- .../aarch64/pass/snippets_mark_skipped.cpp | 16 ++ .../transformation_pipeline.cpp | 21 +- .../src/arm/matmul_weights_decompression.cpp | 86 ++++++++ .../classes/matmul_weights_decompression.cpp | 167 ++++++++++++++ .../classes/matmul_weights_decompression.hpp | 79 +++++++ .../src/x64/matmul_weights_decompression.cpp | 204 +----------------- .../skip_tests_config.cpp | 1 - 18 files changed, 574 insertions(+), 278 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp diff --git a/src/plugins/intel_cpu/src/cpu_memory.cpp b/src/plugins/intel_cpu/src/cpu_memory.cpp index 7cb4abc2161f14..71851c529c6095 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.cpp +++ b/src/plugins/intel_cpu/src/cpu_memory.cpp @@ -45,7 +45,7 @@ void transferData(const IMemory& src, const IMemory& dst, bool ftz) { if (!ftz) { return; } - if (src.getDesc().getPrecision() != ov::element::f32 || dst.getDesc().getPrecision() == ov::element::bf16) { + if (src.getDesc().getPrecision() != ov::element::f32 || dst.getDesc().getPrecision() != ov::element::f32) { return; } size_t offset = 0; diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp index 9b86a1433acb06..be0c8a2a62d954 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp @@ -628,13 +628,71 @@ static MemoryPtr prepackDecompressionParams(const MemoryCPtr& paramsPtr, auto srcMem = std::make_shared(engine, srcMemoryDesc, paramsPtr->getData()); dstMem->load(*srcMem); - return dstMem; } +static dnnl::memory::dims getGroupDims(const VectorDims& weiDims, const VectorDims& scaleDims) { + if (scaleDims[0] == 1 && scaleDims[1] == 1) + return {}; + + int N = weiDims[weiDims.size() - 2]; + int K = weiDims[weiDims.size() - 1]; + dnnl::memory::dim groupN = N / scaleDims[0]; + dnnl::memory::dim groupK = K / scaleDims[1]; + + return {groupK, groupN}; +} + +static int getMask(const VectorDims& weiDims, const dnnl::memory::dims& groupDims) { + const int maskN = 1 << (weiDims.size() - 1); + const int maskK = 1 << (weiDims.size() - 2); + int N = weiDims[weiDims.size() - 2]; + int K = weiDims[weiDims.size() - 1]; + int mask = 0; + if (!groupDims.empty() && groupDims[1] != N) + mask += maskN; + if (!groupDims.empty() && groupDims[0] != K) + mask += maskK; + + return mask; +} + void DnnlPostOpsComposer::appendDecompressionScales(const MemoryCPtr& scales_ptr, bool needTranspose, - ov::element::Type dstPrecision) { + ov::element::Type dstPrecision, + const VectorDims& weiDims) { + if (scales_ptr == nullptr) + return; + + auto scaleMem = prepackDecompressionParams(scales_ptr, needTranspose, dstPrecision, engine); + auto groupDims = getGroupDims(weiDims, scaleMem->getStaticDims()); + auto mask = getMask(weiDims, groupDims); + + attr.set_scales(DNNL_ARG_WEIGHTS, mask, groupDims, DnnlExtensionUtils::ElementTypeToDataType(dstPrecision)); + cpuArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS] = std::move(scaleMem); + dnnlArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS] = + cpuArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS]->getPrimitive(); +} + +void DnnlPostOpsComposer::appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, + bool needTranspose, + ov::element::Type dstPrecision, + const VectorDims& weiDims) { + if (zero_points_ptr == nullptr) + return; + + auto zeroPointsMem = prepackDecompressionParams(zero_points_ptr, needTranspose, dstPrecision, engine); + auto groupDims = getGroupDims(weiDims, zeroPointsMem->getStaticDims()); + auto mask = getMask(weiDims, groupDims); + + attr.set_zero_points(DNNL_ARG_WEIGHTS, mask, groupDims, DnnlExtensionUtils::ElementTypeToDataType(dstPrecision)); + cpuArgs[DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS] = zeroPointsMem; + dnnlArgs[DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS] = zeroPointsMem->getPrimitive(); +} + +void DnnlPostOpsComposer::appendDecompressionScalesLegacy(const MemoryCPtr& scales_ptr, + bool needTranspose, + ov::element::Type dstPrecision) { if (scales_ptr == nullptr) return; @@ -647,9 +705,9 @@ void DnnlPostOpsComposer::appendDecompressionScales(const MemoryCPtr& scales_ptr cpuArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS]->getPrimitive(); } -void DnnlPostOpsComposer::appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, - bool needTranspose, - ov::element::Type dstPrecision) { +void DnnlPostOpsComposer::appendDecompressionZeroPointsLegacy(const MemoryCPtr& zero_points_ptr, + bool needTranspose, + ov::element::Type dstPrecision) { if (zero_points_ptr == nullptr) return; diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.h b/src/plugins/intel_cpu/src/dnnl_postops_composer.h index 7ae634658b005f..81fd1aaeed194d 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.h +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.h @@ -30,10 +30,21 @@ class DnnlPostOpsComposer { const MemoryArgs& memory, const dnnl::memory::data_type outDataType); DnnlPrimitiveAttrs compose(); - void appendDecompressionScales(const MemoryCPtr& scales_ptr, bool needTranspose, ov::element::Type dstPrecision); + + void appendDecompressionScales(const MemoryCPtr& scales_ptr, + bool needTranspose, + ov::element::Type dstPrecision, + const VectorDims& weiDims); void appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, bool needTranspose, - ov::element::Type dstPrecision); + ov::element::Type dstPrecision, + const VectorDims& weiDims); + void appendDecompressionScalesLegacy(const MemoryCPtr& scales_ptr, + bool needTranspose, + ov::element::Type dstPrecision); + void appendDecompressionZeroPointsLegacy(const MemoryCPtr& zero_points_ptr, + bool needTranspose, + ov::element::Type dstPrecision); void setDynamicQuantizationParams(uint64_t groupSize); private: diff --git a/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp b/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp index a0590827006eb4..0c8cddd905dc2e 100644 --- a/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp @@ -570,12 +570,13 @@ struct ConvertFromBinPrecision> { } }; -#define INTEL_CPU_CVT_FROM_4BIT_LIST \ - INTEL_CPU_CVT(u4, f32), INTEL_CPU_CVT(u4, bf16), INTEL_CPU_CVT(u4, f16), INTEL_CPU_CVT(u4, i8), \ - INTEL_CPU_CVT(u4, u8), INTEL_CPU_CVT(i4, f32), INTEL_CPU_CVT(i4, bf16), INTEL_CPU_CVT(i4, f16), \ - INTEL_CPU_CVT(i4, i8), INTEL_CPU_CVT(i4, u8), INTEL_CPU_CVT(nf4, f32), INTEL_CPU_CVT(nf4, bf16), \ - INTEL_CPU_CVT(nf4, f16), INTEL_CPU_CVT(nf4, i8), INTEL_CPU_CVT(nf4, u8), INTEL_CPU_CVT(f4e2m1, f32), \ - INTEL_CPU_CVT(f4e2m1, bf16), INTEL_CPU_CVT(f4e2m1, f16), INTEL_CPU_CVT(f4e2m1, i8), INTEL_CPU_CVT(f4e2m1, u8) +#define INTEL_CPU_CVT_FROM_4BIT_LIST \ + INTEL_CPU_CVT(u4, f32), INTEL_CPU_CVT(u4, i32), INTEL_CPU_CVT(u4, bf16), INTEL_CPU_CVT(u4, f16), \ + INTEL_CPU_CVT(u4, i8), INTEL_CPU_CVT(u4, u8), INTEL_CPU_CVT(i4, f32), INTEL_CPU_CVT(i4, i32), \ + INTEL_CPU_CVT(i4, bf16), INTEL_CPU_CVT(i4, f16), INTEL_CPU_CVT(i4, i8), INTEL_CPU_CVT(i4, u8), \ + INTEL_CPU_CVT(nf4, f32), INTEL_CPU_CVT(nf4, bf16), INTEL_CPU_CVT(nf4, f16), INTEL_CPU_CVT(nf4, i8), \ + INTEL_CPU_CVT(nf4, u8), INTEL_CPU_CVT(f4e2m1, f32), INTEL_CPU_CVT(f4e2m1, bf16), INTEL_CPU_CVT(f4e2m1, f16), \ + INTEL_CPU_CVT(f4e2m1, i8), INTEL_CPU_CVT(f4e2m1, u8) struct ConvertFrom4BitContext { ov::element::Type_t inType; diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index 52434a1eeb8461..8ae2d2784193af 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -228,14 +228,16 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, if (dstPrc != f8e8m0 || useDynamicQuantization) dstPrc = ov::element::f32; - dnnlpoc.appendDecompressionScales(memory.at(ARG_WEI | ARG_ATTR_SCALES), !attrs.weightsNonTransposed, dstPrc); + dnnlpoc.appendDecompressionScalesLegacy(memory.at(ARG_WEI | ARG_ATTR_SCALES), + !attrs.weightsNonTransposed, + dstPrc); } if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) { auto dstPrc = useDynamicQuantization ? ov::element::u8 : ov::element::f32; - dnnlpoc.appendDecompressionZeroPoints(memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS), - !attrs.weightsNonTransposed, - dstPrc); + dnnlpoc.appendDecompressionZeroPointsLegacy(memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS), + !attrs.weightsNonTransposed, + dstPrc); } if (useDynamicQuantization) { @@ -247,9 +249,9 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, uint8_t zp_value = (wei_precision == ov::element::i8) ? 128 : 8; DnnlBlockedMemoryDesc zpMemoryDesc(ov::element::u8, Shape({1})); auto decompressionSubtractPtr = std::make_shared(context->getEngine(), zpMemoryDesc, &zp_value); - dnnlpoc.appendDecompressionZeroPoints(decompressionSubtractPtr, - !attrs.weightsNonTransposed, - ov::element::u8); + dnnlpoc.appendDecompressionZeroPointsLegacy(decompressionSubtractPtr, + !attrs.weightsNonTransposed, + ov::element::u8); } dnnlpoc.setDynamicQuantizationParams(attrs.dynamicQuantizationGroupSize); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp index 86b22607111833..9ffe4731689d43 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -76,6 +77,23 @@ bool DnnlMatMulPrimitive::Key::operator==(const Key& rhs) const { return result; } +template +static dimsType normalizeToRank(const dimsType& vec, size_t rank) { + if (vec.size() == rank || vec.empty()) + return vec; + + dimsType result; + result.reserve(rank); + + for (size_t i = vec.size(); i < rank; ++i) { + result.push_back(1); + } + + result.insert(result.end(), vec.begin(), vec.end()); + + return result; +} + std::shared_ptr DnnlMatMulPrimitive::create(const MemoryArgs& memory, const MatMulAttrs& attrs, const ExecutorContext::CPtr context, @@ -105,19 +123,22 @@ DnnlMemoryDescPtr DnnlMatMulPrimitive::makeTransposedWeightDescriptor(const Dnnl const auto& weiDesc = srcDesc->getDnnlDesc(); auto wDims = weiDesc.get_dims(); auto wDataType = weiDesc.get_data_type(); + std::swap(wDims[wDims.size() - 1], wDims[wDims.size() - 2]); dnnl::memory::dims wDims2D = reshapeDownToRank<2>(wDims); const auto format = weightsNonTransposed ? dnnl::memory::format_tag::ab : dnnl::memory::format_tag::ba; const auto transposedWeiDesc = dnnl::memory::desc{wDims2D, wDataType, format}; + const auto reshapedWeiDesc = transposedWeiDesc.reshape(dstDesc->getDnnlDesc().get_dims()); - return DnnlExtensionUtils::makeDescriptor(transposedWeiDesc); + return DnnlExtensionUtils::makeDescriptor(reshapedWeiDesc); } static DnnlPrimitiveAttrs createPrimitiveAttrs(const MatMulAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, ExecutorContext::CPtr context, - bool useDynamicQuantization) { + bool useWeightsDecompression, + bool weightsNonTransposed) { const auto& srcDesc = memory.at(ARG_SRC)->getDescPtr(); const auto& weiDesc = memory.at(ARG_WEI)->getDescPtr(); const auto& dstDesc = memory.at(ARG_DST)->getDescPtr(); @@ -132,7 +153,30 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const MatMulAttrs& attrs, DnnlPostOpsComposer dnnlpoc(postOps, context->getEngine(), dims, dims.size() - 1, isINT8, 1 << 0, memory, outputDataType); - return dnnlpoc.compose(); + const auto maxRank = + std::max({srcDesc->getShape().getRank(), weiDesc->getShape().getRank(), dstDesc->getShape().getRank()}); + const auto normWeiDims = normalizeToRank(weiDesc->getShape().getStaticDims(), maxRank); + if (memory.count(ARG_WEI | ARG_ATTR_SCALES)) { + auto dstPrc = ov::element::f32; + dnnlpoc.appendDecompressionScales(memory.at(ARG_WEI | ARG_ATTR_SCALES), + !weightsNonTransposed, + dstPrc, + normWeiDims); + } + if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) { + // TODO: clarify oneDNN requirements on ZP precision + auto zp = memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS); + auto zpPrc = zp->getPrecision(); + auto dstPrc = one_of(zpPrc, i32, i8, u8, i4, u4) ? zpPrc : i32; + dnnlpoc.appendDecompressionZeroPoints(zp, !weightsNonTransposed, dstPrc, normWeiDims); + } + + auto primAttrs = dnnlpoc.compose(); + if (useWeightsDecompression) { + primAttrs.attr.set_fpmath_mode(fpmath_mode::any, true); + } + + return primAttrs; } static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory::desc& inputDesc, @@ -143,22 +187,6 @@ static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory: const dnnl::engine& engine, const bool useSparseWeights, const bool useWeightsDecompression) { - auto normalizeToRank = [](const dnnl::memory::dims& vec, size_t rank) -> dnnl::memory::dims { - if (vec.size() == rank || vec.empty()) - return vec; - - dnnl::memory::dims result; - result.reserve(rank); - - for (size_t i = vec.size(); i < rank; ++i) { - result.push_back(1); - } - - result.insert(result.end(), vec.begin(), vec.end()); - - return result; - }; - auto weiDims = weightDesc.get_dims(); std::swap(weiDims[weiDims.size() - 1], weiDims[weiDims.size() - 2]); @@ -175,7 +203,9 @@ static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory: auto idt = inputDesc.get_data_type(); auto wdt = idt; - if (idt == dnnl::memory::data_type::u8 || idt == dnnl::memory::data_type::s8) { + if (useWeightsDecompression) { + wdt = weightDesc.get_data_type(); + } else if (idt == dnnl::memory::data_type::u8 || idt == dnnl::memory::data_type::s8) { wdt = memory::data_type::s8; } @@ -245,6 +275,16 @@ static VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDim return outputShape; } +bool DnnlMatMulPrimitive::useWeightsDecompressionImpl(const ov::element::Type inputType, + const ov::element::Type weightsType) { +#if defined(OPENVINO_ARCH_X86_64) + if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2)) + return false; +#endif + + return (one_of(inputType, f32, bf16, f16) && one_of(weightsType, u8, i8, u4, i4)); +} + DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, @@ -257,7 +297,9 @@ DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAt auto dstDesc = memory.at(ARG_DST)->getDescPtr(); MatMulAttrs mmAttrs{false, false}; - const auto postOpData = createPrimitiveAttrs(mmAttrs, postOps, memory, context, false); + const auto useWeightsDecompression = useWeightsDecompressionImpl(srcDesc->getPrecision(), weiDesc->getPrecision()); + const auto postOpData = + createPrimitiveAttrs(mmAttrs, postOps, memory, context, useWeightsDecompression, attrs.weightsNonTransposed); if (!cacheWeights) return std::make_shared(postOpData); @@ -285,7 +327,7 @@ DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAt context->getEngine(), context->getImplPriorities(), false, - false); + useWeightsDecompression); const auto weightsDesc = DnnlExtensionUtils::makeDescriptor(primDesc.weights_desc()); auto originalWeightsDesc = MemoryDescUtils::convertToDnnlMemoryDesc(weiDesc); @@ -319,7 +361,7 @@ DnnlMatMulPrimitive::DnnlMatMulPrimitive(const Key& key, engine, implPriorities, false, - false)), + useWeightsDecompressionImpl(key.src->getPrecision(), key.wei->getPrecision()))), m_implType(implTypeFromPrimDesc(m_primDesc)), m_srcDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.src_desc())), m_weiDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.weights_desc())), @@ -328,8 +370,6 @@ DnnlMatMulPrimitive::DnnlMatMulPrimitive(const Key& key, m_prim(primitive(m_primDesc)) {} void DnnlMatMulPrimitive::execute(const dnnl_primitive_args& primArgs) const { - std::cout << "Executing MM primitive" - << "\n"; m_prim.execute(m_stream, primArgs); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp index 618d3abdf8b3de..5491b62a154687 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp @@ -53,6 +53,8 @@ class DnnlMatMulPrimitive { return m_implType; } + static bool useWeightsDecompressionImpl(const ov::element::Type inputType, const ov::element::Type weightsType); + static DnnlShapeAgnosticDataPtr createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index bc55af8cfbb0e2..f2cf5a7c9102b7 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -133,6 +133,8 @@ static const TypeMapping dnnlMatMulTypeMapping { // quantization configuration {{_u8 | _i8, _i8, _u8|_i8|_i32|_bf16|_f16|_f32|_undefined, _u8|_i8|_i32|_bf16|_f16|_f32}, pt(bypass(), bypass(), bypass(), bypass())}, {{_u8 | _i8, _i8, _any, _any}, pt(bypass(), bypass(), just(), just())}, + // compresses int weights + {{_f32 | _bf16 | _f16, _u8 | _i8, _any, _any}, pt(bypass(), bypass(), use<0>(), use<0>())}, // @todo should we fallback to FPXX instead of _f32? {{_any, _any, _any, _any}, pt(just(), just(), just(), just())}, // @todo explicitly cover configuration limitations for oneDNN on ARM @@ -443,7 +445,7 @@ const std::vector>& getImplementations() { return std::make_shared(attrs, postOps, memory, context); } ) - OV_CPU_INSTANCE_X64( + OV_CPU_INSTANCE_DNNL( "matmul_dnnl", ExecutorType::Dnnl, OperationType::MatMul, @@ -454,7 +456,6 @@ const std::vector>& getImplementations() { CPU_DEBUG_CAP_ENABLE( if (getEnvBool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC")) { VERIFY(noSparseDecompression(config), UNSUPPORTED_SPARSE_WEIGHTS); - VERIFY(noWeightsDecompression(config), UNSUPPORTED_WEIGHTS_DECOMPRESSION); return true; }) return false; diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 2df6c0ae7522cc..4a2e3728887087 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -41,6 +41,42 @@ namespace ov { namespace intel_cpu { namespace node { +ov::element::TypeVector FullyConnected::getSupportedCompressedWeightsTypes() { + using ov::element::Type_t; + + bool useMatmulPrim = false; + CPU_DEBUG_CAP_ENABLE(useMatmulPrim = getEnvBool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC");) + + if (useMatmulPrim) { + return {Type_t::u8, Type_t::i8}; + } else { +#if defined(OPENVINO_ARCH_X86_64) + return {Type_t::u8, Type_t::i8, Type_t::u4, Type_t::i4, Type_t::nf4, Type_t::f4e2m1}; +#else + return {}; +#endif + } +} + +ov::element::TypeVector FullyConnected::getSupportedCompressedActivationsTypes() { + using ov::element::Type_t; + + bool useMatmulPrim = false; + CPU_DEBUG_CAP_ENABLE(useMatmulPrim = getEnvBool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC");) + + if (useMatmulPrim) { + return {Type_t::f32, Type_t::f16}; + } else { +#if defined(OPENVINO_ARCH_X86_64) + // @todo enable for bf16 as well + // after EnforceInferencePrecision is replaced with ConvertPrecision + return {Type_t::f32}; +#else + return {}; +#endif + } +} + bool FullyConnected::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { @@ -113,7 +149,9 @@ bool FullyConnected::isSupportedCompressedOperation(const std::shared_ptr& model, const C CPU_REGISTER_PASS_COMMON(manager, ConvertMatMulToFC); CPU_REGISTER_PASS_COMMON(manager, FullyConnectedBiasFusion); - std::vector supported_activation_types{ - // @todo enable for bf16 as well - // after EnforceInferencePrecision is replaced with ConvertPrecision - ov::element::f32, - }; - - std::vector supported_compressed_weights_types{ - ov::element::u8, - ov::element::i8, - ov::element::u4, - ov::element::i4, - ov::element::nf4, - ov::element::f4e2m1, - }; - - CPU_REGISTER_PASS_X64( + CPU_REGISTER_PASS_COMMON( manager, pass::ConvertFullyConnectedToFullyConnectedCompressed, - supported_activation_types, - supported_compressed_weights_types, + ov::intel_cpu::node::FullyConnected::getSupportedCompressedActivationsTypes(), + ov::intel_cpu::node::FullyConnected::getSupportedCompressedWeightsTypes(), [&config](const std::shared_ptr& fc, size_t IC, size_t OC, size_t G) { return ov::intel_cpu::node::FullyConnected::isSupportedCompressedOperation(fc, IC, @@ -65,8 +50,8 @@ inline void ConvertToCPUSpecificOpset(std::shared_ptr& model, const C }); CPU_REGISTER_PASS_X64(manager, pass::ConvertFCToFCQuantizedLegacy); - CPU_REGISTER_PASS_X64(manager, MoveFCReshapeToWeights); - CPU_REGISTER_PASS_X64(manager, ov::pass::Validate); + CPU_REGISTER_PASS_COMMON(manager, MoveFCReshapeToWeights); + CPU_REGISTER_PASS_COMMON(manager, ov::pass::Validate); CPU_REGISTER_PASS_COMMON(manager, AlignMatMulInputRanks); CPU_REGISTER_PASS_COMMON(manager, ConvertTileToSeqTiles); CPU_REGISTER_PASS_COMMON(manager, ConvertToPowerStatic); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp index 25b10d55ca8165..c567e7c38c2ef1 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp @@ -212,6 +212,13 @@ auto is_skipped_op(const std::shared_ptr& op) -> bool { return ov::is_type(op) || ov::is_type(op) || ov::is_type(op); } + +bool isSuitableMatMulWithConstantPath(const std::shared_ptr& node) { + return ov::is_type(node) && + !ov::is_type(node->get_input_node_shared_ptr(1)) && + ov::op::util::is_on_constant_path(node->input_value(1)); +} + } // namespace bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { @@ -220,6 +227,15 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { for (auto& node : m->get_ordered_ops()) { if (is_skipped_op(node)) continue; + // We perform this check separately because we mark here only weights path + // Matmul itself will be checked further + if (isSuitableMatMulWithConstantPath(node)) { + auto markup_func = [](Node* node) { + SetSnippetsNodeType(node->shared_from_this(), snippets::pass::SnippetsNodeType::SkippedByPlugin); + }; + std::unordered_set visited; + ov::op::util::visit_constant_path(node->get_input_node_ptr(1), visited, markup_func); + } if (isSuitableConvolutionParent(node)) { // Initiate fusing chain SetNodeFusingType(node, NodeFusingType::FusedWithConvolution); diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 469abbd99eb149..13e890f6339e81 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -337,19 +337,14 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_REGISTER_PASS_COMMON(decompression_handling_manager, ov::pass::MarkShapeOfSubgraphs); // We need to fuse Transpose to MatMul to have a simpler callback for the next transformation CPU_REGISTER_PASS_X64(decompression_handling_manager, ov::pass::TransposeMatMul); - ov::element::TypeVector decompression_precisions{ov::element::u8, - ov::element::i8, - ov::element::u4, - ov::element::i4, - ov::element::nf4, - ov::element::f4e2m1}; - - CPU_REGISTER_PASS_X64(decompression_handling_manager, - ov::pass::MarkDequantization, - decompression_precisions, - false, - true); - CPU_SET_CALLBACK_X64( + CPU_REGISTER_PASS_ARM(decompression_handling_manager, ov::pass::TransposeMatMul); + const auto& decompression_precisions = ov::intel_cpu::node::FullyConnected::getSupportedCompressedWeightsTypes(); + CPU_REGISTER_PASS_COMMON(decompression_handling_manager, + ov::pass::MarkDequantization, + decompression_precisions, + false, + true); + CPU_SET_CALLBACK_COMMON( decompression_handling_manager, [&](const_node_ptr& node) -> bool { return !is_decompression_multiply(node); diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp new file mode 100644 index 00000000000000..408dd40b4c658f --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp" + +#include "openvino/util/env_util.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +namespace { + +std::vector filter_additional_config_basic() { + return {{}, {ov::hint::inference_precision(ov::element::f16)}}; +} + +const std::vector decompression_precisions = {ov::element::f32}; +const std::vector weights_precisions = {ov::element::u8, ov::element::i8}; + +bool should_use_decompression_impl() { +#ifdef CPU_DEBUG_CAPS + return ov::util::getenv_bool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC"); +#else + return false; +#endif +} + +const std::vector input_shapes = { + {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {16, 32}}, + {{{}, {{1, 8, 16}}}, {16, 32}, 4ul}, + {{{}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{}, {{5, 40, 96}}}, {1, 96, 240}}, + {{{}, {{1, 4, 48}}}, {48, 256}}, + {{{}, {{1, 11, 104}}}, {104, 77}, 104ul}, + {{{-1, -1, -1}, {{10, 40, 110}, {11, 40, 110}}}, {1, 110, 256}}, +}; +const std::vector fusing_params{emptyFusingSpec, fusingBias}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights, + MatmulWeightsDecompression, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(true), + ::testing::Values(DecompressionSubtractType::full), + ::testing::Values(false), + ::testing::ValuesIn(filter_additional_config_basic()), + ::testing::ValuesIn(fusing_params), + ::testing::Values(should_use_decompression_impl())), + MatmulWeightsDecompression::getTestCaseName); + +const std::vector input_shapes_corner_cases = { + {{{-1, -1, -1}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{-1, -1, -1}, {{1, 4, 16}}}, {16, 32}}, + {{{-1, -1, -1}, {{1, 5, 16}}}, {16, 32}, 4ul}, + {{{-1, -1, -1}, {{1, 1, 128}}}, {128, 128}, 16ul}, +}; + +const std::vector transpose_weights = {true, false}; +const std::vector decompression_subtract_type = {DecompressionSubtractType::full, + DecompressionSubtractType::scalar, + DecompressionSubtractType::empty}; +const std::vector reshape_on_decompression = {true, false}; +const std::vector decompression_precisions_corner_cases = {ov::element::f16, ov::element::f32}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases, + MatmulWeightsDecompression, + ::testing::Combine(::testing::ValuesIn(input_shapes_corner_cases), + ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions_corner_cases), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(transpose_weights), + ::testing::ValuesIn(decompression_subtract_type), + ::testing::ValuesIn(reshape_on_decompression), + ::testing::ValuesIn(filter_additional_config_basic()), + ::testing::Values(emptyFusingSpec), + ::testing::Values(should_use_decompression_impl())), + MatmulWeightsDecompression::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp new file mode 100644 index 00000000000000..e14245f2906e16 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp @@ -0,0 +1,167 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matmul_weights_decompression.hpp" +#include "openvino/runtime/intel_cpu/properties.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +std::string MatmulWeightsDecompression::getTestCaseName(testing::TestParamInfo obj) { + MatMulDecompressionShapeParams shape_params; + ov::test::ElementType weights_precision; + ov::test::ElementType decompression_precision; + ov::test::ElementType scale_precision; + bool transpose; + DecompressionSubtractType decompression_subtract_type; + bool reshape_on_decompression; + ov::AnyMap additional_config; + fusingSpecificParams fusing_params; + bool should_fuse; + + std::tie(shape_params, + weights_precision, + decompression_precision, + scale_precision, + transpose, + decompression_subtract_type, + reshape_on_decompression, + additional_config, + fusing_params, + should_fuse) = obj.param; + + std::ostringstream result; + result << shape_params << "_"; + result << "weights_precision=" << weights_precision << "_"; + result << "decompression_precision=" << decompression_precision << "_"; + result << "scale_precision=" << scale_precision << "_"; + result << "transpose_weights=" << transpose << "_"; + result << "decompression_subtract=" << decompression_subtract_type << "_"; + result << "reshape_on_decompression=" << reshape_on_decompression << "_"; + + result << "config=("; + for (const auto& configEntry : additional_config) { + result << configEntry.first << ", " << configEntry.second.as() << "_"; + } + result << ")"; + result << CpuTestWithFusing::getTestCaseName(fusing_params); + + return result.str(); +} + +std::shared_ptr MatmulWeightsDecompression::initSubgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const ov::element::Type decompression_precision, + const ov::element::Type scale_precision, + const bool transpose_weights, + const DecompressionSubtractType decompression_subtract_type, + const bool reshape_on_decompression) { + ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; + const auto weights_subgraph = initMatMulDecompressionSubgraph(weights_shape, + group_size, + data_precision, + weights_precision, + decompression_precision, + scale_precision, + transpose_weights, + decompression_subtract_type, + reshape_on_decompression); + auto matMul = std::make_shared(params[0], weights_subgraph); + return makeNgraphFunction(data_precision, params, matMul, "MatmulWeightsDecompression"); +} + +void MatmulWeightsDecompression::SetUp() { + targetDevice = ov::test::utils::DEVICE_CPU; + + MatMulDecompressionShapeParams shape_params; + ov::test::ElementType weights_precision; + ov::test::ElementType decompression_precision; + ov::test::ElementType scale_precision; + bool transpose_weights; + DecompressionSubtractType decompression_subtract_type; + bool reshape_on_decompression; + ov::AnyMap additional_config; + fusingSpecificParams fusing_params; + bool should_fuse; + + std::tie(shape_params, + weights_precision, + decompression_precision, + scale_precision, + transpose_weights, + decompression_subtract_type, + reshape_on_decompression, + additional_config, + fusing_params, + should_fuse) = GetParam(); + + configuration.insert(additional_config.begin(), additional_config.end()); + std::tie(postOpMgrPtr, fusedOps) = fusing_params; + init_input_shapes({shape_params.data_shape}); + + if (!configuration.count(ov::hint::dynamic_quantization_group_size.name())) { + abs_threshold = 5e-3; + } + + // if dynamic quantization is enabled + if (configuration.count(ov::hint::dynamic_quantization_group_size.name()) && + configuration.at(ov::hint::dynamic_quantization_group_size.name()) != 0) { + abs_threshold = 0.1; + } + + if (configuration.count(ov::hint::inference_precision.name()) && + configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) { + abs_threshold = 0.2; + } + + ElementType netType = ov::element::f32; + inType = outType = netType; + + function = initSubgraph(inputDynamicShapes[0], + shape_params.weights_shape, + shape_params.decompression_group_size, + netType, + weights_precision, + decompression_precision, + scale_precision, + transpose_weights, + decompression_subtract_type, + reshape_on_decompression); +} + +void MatmulWeightsDecompression::check_results() { + const auto& test_param = GetParam(); + const ov::element::Type compressed_weights_precision = std::get<1>(test_param); + const bool use_matmul_decompression_impl = std::get<9>(test_param); + + const auto runtime_model = compiledModel.get_runtime_model(); + const auto result = runtime_model->get_result(); + auto fc = result->get_input_node_shared_ptr(0); + // Handle precision conversion before output + auto type = fc->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); + if (type == "Reorder" || type == "Convert" || type == "Subgraph") + fc = fc->get_input_node_shared_ptr(0); + + type = fc->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); + EXPECT_EQ(type, "FullyConnected"); + + const auto& expected_weights_precision = use_matmul_decompression_impl + ? compressed_weights_precision + : fc->get_input_element_type(0); + EXPECT_EQ(fc->get_input_element_type(1), expected_weights_precision); +} + +TEST_P(MatmulWeightsDecompression, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + run(); + check_results(); +} + +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp new file mode 100644 index 00000000000000..266aab8e445928 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/subgraph/weights_decompression_builders.hpp" +#include "utils/cpu_test_utils.hpp" +#include "utils/fusing_test_utils.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +/* + * WP - weights precision + * DP - decompression precision + * IP - input precision + * SP - scale precision + * Opt - optional + * Subtract_const(WP) + * / + * Weights(WP) Convert(DP) + * | / Multiply_const(SP) + * Convert(DP) Reshape (Opt) / + * \ / Convert(if SP != DP) + * Subtract(Opt) / + * \ Reshape (Opt) + * \ / + * Multiply + * | + * Reshape (in case of group decompression) + * | + * Convert (if IP != DP) + * | + * Data(IP) Transpose(Opt) + * \ / + * Matmul + * | + * Bias + */ +typedef std::tuple // should use decompression implementation + MatmulWeightsDecompressionParams; + +class MatmulWeightsDecompression : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CpuTestWithFusing { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + std::shared_ptr initSubgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const ov::element::Type decompression_precision, + const ov::element::Type scale_precision, + const bool transpose_weights, + const DecompressionSubtractType decompression_subtract_type, + const bool reshape_on_decompression); + + void SetUp() override; + + void check_results(); +}; + +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp index 9a434943893eed..5a5a375566b955 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp @@ -1,200 +1,13 @@ -// Copyright (C) 2023-2024 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "utils/fusing_test_utils.hpp" -#include "openvino/runtime/intel_cpu/properties.hpp" -#include "shared_test_classes/subgraph/weights_decompression_builders.hpp" +#include "custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp" using namespace CPUTestUtils; namespace ov { namespace test { -/* - * WP - weights precision - * DP - decompression precision - * IP - input precision - * SP - scale precision - * Opt - optional - * Subtract_const(WP) - * / - * Weights(WP) Convert(DP) - * | / Multiply_const(SP) - * Convert(DP) Reshape (Opt) / - * \ / Convert(if SP != DP) - * Subtract(Opt) / - * \ Reshape (Opt) - * \ / - * Multiply - * | - * Reshape (in case of group decompression) - * | - * Convert (if IP != DP) - * | - * Data(IP) Transpose(Opt) - * \ / - * Matmul - * | - * Bias - */ -using MatmulWeightsDecompressionParams = std::tuple; // should use decompression implementation - -class MatmulWeightsDecompression : public testing::WithParamInterface, - virtual public SubgraphBaseTest, - public CpuTestWithFusing { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - MatMulDecompressionShapeParams shape_params; - ov::test::ElementType weights_precision; - ov::test::ElementType decompression_precision; - ov::test::ElementType scale_precision; - bool transpose; - DecompressionSubtractType decompression_subtract_type; - bool reshape_on_decompression; - ov::AnyMap additional_config; - fusingSpecificParams fusing_params; - bool should_fuse; - - std::tie(shape_params, - weights_precision, - decompression_precision, - scale_precision, - transpose, - decompression_subtract_type, - reshape_on_decompression, - additional_config, - fusing_params, - should_fuse) = obj.param; - - std::ostringstream result; - result << shape_params << "_"; - result << "weights_precision=" << weights_precision << "_"; - result << "decompression_precision=" << decompression_precision << "_"; - result << "scale_precision=" << scale_precision << "_"; - result << "transpose_weights=" << transpose << "_"; - result << "decompression_subtract=" << decompression_subtract_type << "_"; - result << "reshape_on_decompression=" << reshape_on_decompression << "_"; - - result << "config=("; - for (const auto& configEntry : additional_config) { - result << configEntry.first << ", " << configEntry.second.as() << "_"; - } - result << ")"; - result << CpuTestWithFusing::getTestCaseName(fusing_params); - - return result.str(); - } - -protected: - std::shared_ptr initSubgraph(const ov::PartialShape& data_shape, - const ov::Shape& weights_shape, - const int group_size, - const ov::element::Type data_precision, - const ov::element::Type weights_precision, - const ov::element::Type decompression_precision, - const ov::element::Type scale_precision, - const bool transpose_weights, - const DecompressionSubtractType decompression_subtract_type, - const bool reshape_on_decompression) { - ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; - const auto weights_subgraph = initMatMulDecompressionSubgraph(weights_shape, - group_size, - data_precision, - weights_precision, - decompression_precision, - scale_precision, - transpose_weights, - decompression_subtract_type, - reshape_on_decompression); - auto matMul = std::make_shared(params[0], weights_subgraph); - return makeNgraphFunction(data_precision, params, matMul, "MatmulWeightsDecompression"); - } - - void SetUp() override { - targetDevice = ov::test::utils::DEVICE_CPU; - - MatMulDecompressionShapeParams shape_params; - ov::test::ElementType weights_precision; - ov::test::ElementType decompression_precision; - ov::test::ElementType scale_precision; - bool transpose_weights; - DecompressionSubtractType decompression_subtract_type; - bool reshape_on_decompression; - ov::AnyMap additional_config; - fusingSpecificParams fusing_params; - bool should_fuse; - - std::tie(shape_params, - weights_precision, - decompression_precision, - scale_precision, - transpose_weights, - decompression_subtract_type, - reshape_on_decompression, - additional_config, - fusing_params, - should_fuse) = GetParam(); - - configuration.insert(additional_config.begin(), additional_config.end()); - std::tie(postOpMgrPtr, fusedOps) = fusing_params; - init_input_shapes({shape_params.data_shape}); - - // if dynamic quantization is enabled - if (configuration.count(ov::hint::dynamic_quantization_group_size.name()) && - configuration.at(ov::hint::dynamic_quantization_group_size.name()) != 0) { - abs_threshold = 0.1; - } else if (!configuration.count(ov::hint::dynamic_quantization_group_size.name())) { - abs_threshold = 5e-3; - } - - ElementType netType = ov::element::f32; - inType = outType = netType; - - function = initSubgraph(inputDynamicShapes[0], - shape_params.weights_shape, - shape_params.decompression_group_size, - netType, - weights_precision, - decompression_precision, - scale_precision, - transpose_weights, - decompression_subtract_type, - reshape_on_decompression); - } - - void check_results() { - const auto& test_param = GetParam(); - const ov::element::Type compressed_weights_precision = std::get<1>(test_param); - const bool use_matmul_decompression_impl = std::get<9>(test_param); - - const auto runtime_model = compiledModel.get_runtime_model(); - const auto result = runtime_model->get_result(); - const auto fc = result->get_input_node_shared_ptr(0); - const auto type = fc->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); - EXPECT_EQ(type, "FullyConnected"); - - const auto& expected_weights_precision = use_matmul_decompression_impl - ? compressed_weights_precision - : fc->get_input_element_type(0); - EXPECT_EQ(fc->get_input_element_type(1), expected_weights_precision); - } -}; - -TEST_P(MatmulWeightsDecompression, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - run(); - check_results(); -} namespace { @@ -205,7 +18,8 @@ std::vector filter_additional_config_basic() { std::vector filter_additional_config_amx() { std::vector additional_config = {}; if (ov::with_cpu_x86_avx512_core_amx()) - additional_config.push_back({{ov::hint::dynamic_quantization_group_size(0), ov::hint::inference_precision(ov::element::bf16)}}); + additional_config.push_back( + {{ov::hint::dynamic_quantization_group_size(0), ov::hint::inference_precision(ov::element::bf16)}}); return additional_config; } @@ -310,8 +124,9 @@ const std::vector input_shapes_corner_cases_amx }; const std::vector transpose_weights = {true, false}; -const std::vector decompression_subtract_type = { - DecompressionSubtractType::full, DecompressionSubtractType::scalar, DecompressionSubtractType::empty}; +const std::vector decompression_subtract_type = {DecompressionSubtractType::full, + DecompressionSubtractType::scalar, + DecompressionSubtractType::empty}; const std::vector reshape_on_decompression = {true, false}; const std::vector decompression_precisions_corner_cases = {ov::element::f16, ov::element::f32}; @@ -387,12 +202,11 @@ const std::vector input_shapes_basic_dyn_quant = {{{}, {{1, 1, 1728}}}, {1728, 128}, 64lu}, }; -const std::vector weights_precisions_dyn_quant = {ov::element::u8, - ov::element::u4}; +const std::vector weights_precisions_dyn_quant = {ov::element::u8, ov::element::u4}; std::vector filter_additional_config_dyn_quant() { std::vector additional_config = { - {{ov::hint::dynamic_quantization_group_size(0)}}, // dynamic quantization is disabled + {{ov::hint::dynamic_quantization_group_size(0)}}, // dynamic quantization is disabled {{ov::hint::dynamic_quantization_group_size(16)}}, {{ov::hint::dynamic_quantization_group_size(128)}}, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 4c21c06c491179..7af707df602bfc 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -481,7 +481,6 @@ std::vector disabledTestPatterns() { // Issue 88764, 91647, 108802: accuracy issue retVector.emplace_back(R"(MultipleLSTMCellTest/MultipleLSTMCellTest.CompareWithRefs.*)"); // Compressed weights are not supported - retVector.emplace_back(R"(smoke_MatMulCompressedWeights.*)"); retVector.emplace_back(R"(smoke_MatMulSharedCompressedWeights.*)"); retVector.emplace_back(R"(smoke_MatmulAndGatherSharedWeightsDecompression.*)"); // smoke_Snippets test cases are not supported on arm32 platforms From 83a55dfb79e6410af835cbca6685283037c254bc Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Thu, 19 Dec 2024 17:46:58 +0100 Subject: [PATCH 29/60] [RTTI] Apply OPENVINO_MATCHER_PASS_RTTI in Plugins (#28124) ### Details: - Applied OPENVINO_MATCHER_PASS_RTTI helper macro in Plugins. ### Tickets: - CVS-159567 --------- Signed-off-by: Tomasz Jankowski Co-authored-by: Michal Lukaszewski --- .../cpu_opset/arm/pass/convert_group_conv.hpp | 2 +- .../arm/pass/convert_group_conv1d.hpp | 4 ++-- .../arm/pass/convert_reduce_multi_axis.hpp | 2 +- .../arm/pass/convert_reduce_no_keep_dims.hpp | 2 +- .../cpu_opset/arm/pass/mish_decomposition.hpp | 2 +- .../common/pass/align_matmul_input_ranks.hpp | 2 +- .../pass/causal_mask_preprocess_fusion.cpp | 2 +- .../pass/convert_broadcast_to_tiles.hpp | 2 +- .../pass/convert_fq_rnn_to_quantized_rnn.hpp | 2 +- .../common/pass/convert_matmul_to_fc.hpp | 2 +- .../common/pass/convert_tile_to_seq_tiles.hpp | 2 +- .../common/pass/convert_to_leaky_relu.hpp | 2 +- .../common/pass/convert_to_power_static.hpp | 2 +- .../common/pass/convert_to_swish_cpu.hpp | 2 +- .../common/pass/decompose_integer_divide.hpp | 2 +- .../common/pass/decompose_rms_norm.hpp | 2 +- .../cpu_opset/common/pass/fc_bias_fusion.hpp | 2 +- .../pass/insert_convert_after_extension.hpp | 2 +- .../pass/move_fc_reshape_to_weights.hpp | 2 +- .../cpu_opset/common/pass/ngram_fusion.hpp | 2 +- .../pass/permute_slice_n_interpolation.hpp | 2 +- .../pass/rnn_sequences_optimization.hpp | 6 +++--- .../common/pass/stateful_sdpa_fusion.hpp | 2 +- .../common/pass/swap_convert_transpose.hpp | 1 + .../x64/pass/convert_to_interaction.hpp | 6 +++--- .../cpu_opset/x64/pass/mha_fusion.hpp | 3 +++ .../cpu_opset/x64/pass/mlp_fusion.hpp | 4 ++-- .../cpu_opset/x64/pass/qkv_proj_fusion.hpp | 6 +++--- .../x64/pass/sdpa_fuse_transpose_reshape.hpp | 2 +- .../snippets/common/pass/mul_add_to_fma.hpp | 1 + .../x64/pass/brgemm_to_brgemm_cpu.hpp | 2 +- .../x64/pass/eliminate_brgemm_copy_b.hpp | 2 +- .../snippets/x64/pass/remove_converts.hpp | 2 +- .../tpp/x64/pass/brgemm_to_brgemm_tpp.hpp | 2 +- .../tpp/x64/pass/eltwise_to_eltwise_tpp.hpp | 2 +- .../tpp/x64/pass/scalar_to_scalar_tpp.hpp | 2 +- .../bcast_and_pad_zp_buffers.hpp | 2 +- .../transformations/binary_conv_to_conv.hpp | 2 +- .../transformations/clamp_fp16_output.hpp | 2 +- .../transformations/convert_convolution.cpp | 4 ++-- .../convert_fc_to_compressed.hpp | 2 +- .../transformations/convert_matmul_to_fc.hpp | 2 +- .../convert_pooling_to_reduce.hpp | 2 +- .../transformations/convert_shapeof.hpp | 2 +- ...convert_stridedslices_to_variadicsplit.hpp | 2 +- .../decompose_reduce_for_false_keepdims.hpp | 2 ++ .../decompose_reduce_scalar_output.hpp | 2 +- .../dynamic_quantize_fully_connected.hpp | 2 +- .../transformations/einsum_decomposition.hpp | 1 + .../transformations/fc_convert_fusion.hpp | 2 +- .../transformations/fc_horizontal_fusion.hpp | 2 +- .../transformations/fc_per_layer_scaling.hpp | 2 +- .../group_norm_composition.hpp | 2 +- .../increase_position_ids_precision.hpp | 2 +- .../transformations/indirect_kv_cache.hpp | 4 ++-- .../transformations/kv_cache_compression.cpp | 2 +- .../transformations/kv_cache_fusion.cpp | 2 +- .../move_fc_reshape_to_weights.hpp | 2 +- .../optimize_subsequent_reshapes.hpp | 2 +- .../transformations/transpose_fusion.hpp | 6 +++--- ...queeze_broadcast_reshape_matmul_fusion.hpp | 2 +- ...nsqueeze_broadcast_reshape_sdpa_fusion.hpp | 2 +- .../npuw/partitioning/patterns/avoid.hpp | 1 + .../npuw/partitioning/patterns/compute.hpp | 8 +++++++ .../npuw/partitioning/patterns/dcoff.hpp | 16 ++++++++++++++ .../plugin/npuw/partitioning/patterns/opt.hpp | 21 +++++++++++++++++++ 66 files changed, 123 insertions(+), 69 deletions(-) diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp index 55c1ecde2aae10..3e9540f35f3273 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp @@ -61,7 +61,7 @@ namespace intel_cpu { class ConvertGroupConvolution : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGroupConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGroupConvolution"); ConvertGroupConvolution(); }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp index 5674514eeb8e64..7898833529294b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp @@ -54,7 +54,7 @@ namespace ov { namespace intel_cpu { class ConvertConv1DBase : public ov::pass::MatcherPass { protected: - OPENVINO_RTTI("ConvertConv1DBase", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertConv1DBase"); template ov::matcher_pass_callback convert_conv1d_to_conv2d(); }; @@ -71,4 +71,4 @@ class ConvertGroupConv1D : public ConvertConv1DBase { ConvertGroupConv1D(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp index 8e5fd1e38b605a..947d7ee476bc81 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp @@ -51,7 +51,7 @@ namespace intel_cpu { class ConvertReduceMultiAxisBase : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertReduceMultiAxisBase", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertReduceMultiAxisBase"); template ov::matcher_pass_callback convert_reduce(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp index ea4128ea265e42..6cc683154cc175 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp @@ -45,7 +45,7 @@ namespace intel_cpu { class ConvertReduceNoKeepDimsBase : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertReduceNoKeepDims", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertReduceNoKeepDimsBase"); template ov::matcher_pass_callback convert_reduce(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp index 75b45dca468dc7..07384a8f1e24ca 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class MishDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MishDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("MishDecomposition"); MishDecomposition(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp index f9ab862b19f4dd..9b9762ee5e525b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp @@ -19,7 +19,7 @@ namespace intel_cpu { class AlignMatMulInputRanks : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AlignMatMulInputRanks", "0"); + OPENVINO_MATCHER_PASS_RTTI("AlignMatMulInputRanks"); AlignMatMulInputRanks(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp index 5801dbb8ae74a9..5f3058429a8497 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp @@ -24,7 +24,7 @@ using namespace ov::gen_pattern; class CausalMaskPreprocess : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CausalMaskPreprocess", "0"); + OPENVINO_MATCHER_PASS_RTTI("CausalMaskPreprocess"); CausalMaskPreprocess(); private: diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp index b0b1e5632f908a..e1b3307a13bdc4 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertBroadcastToTiles : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBroadcastToTiles", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBroadcastToTiles"); ConvertBroadcastToTiles(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp index ee3692f2ea4ca6..a51a357197ef26 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp @@ -76,7 +76,7 @@ namespace intel_cpu { class ConvertFqRnnToQuantizedRnn : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertFqRnnToQuantizedRnn", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertFqRnnToQuantizedRnn"); ConvertFqRnnToQuantizedRnn(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp index 03366161f8c904..cf628a3497f3a5 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertMatMulToFC : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMatMulToFC", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMatMulToFC"); ConvertMatMulToFC(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp index 5a4928477abec9..3760f2e000aee2 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertTileToSeqTiles : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTileToSeqTiles", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTileToSeqTiles"); ConvertTileToSeqTiles(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp index b09f14de5793c5..40d4ed2907dbba 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertToLeakyRelu : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToLeakyRelu", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToLeakyRelu"); ConvertToLeakyRelu(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp index d34a4c3667b5aa..3797eb780b3d13 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertToPowerStatic : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToPowerStatic", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToPowerStatic"); ConvertToPowerStatic(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp index 3fe3569a13e745..3a640410db472b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertToSwishCPU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToSwishCPU", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToSwishCPU"); ConvertToSwishCPU(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp index 329febfcd21e92..ac7e2c2e72f68a 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class DecomposeIntegerDivide : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DecomposeIntegerDivide", "0"); + OPENVINO_MATCHER_PASS_RTTI("DecomposeIntegerDivide"); DecomposeIntegerDivide(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp index f11a3afa23d20c..4cc58841a77bb7 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class DecomposeRMSNorm : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DecomposeRMSNorm", "0"); + OPENVINO_MATCHER_PASS_RTTI("DecomposeRMSNorm"); DecomposeRMSNorm(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp index d1e64969fbcf4e..5fadd183dfd694 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class FullyConnectedBiasFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FullyConnectedBiasFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("FullyConnectedBiasFusion"); FullyConnectedBiasFusion(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp index 49cfab68bc6911..7c52d3614407fd 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp @@ -14,7 +14,7 @@ namespace pass { class InsertConvertAfterExtension : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("InsertConvertAfterExtension", "0"); + OPENVINO_MATCHER_PASS_RTTI("InsertConvertAfterExtension"); InsertConvertAfterExtension(bool convert_output_precision = true); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp index 0af0469ce0135e..d3869ff7509fae 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp @@ -25,7 +25,7 @@ namespace intel_cpu { */ class MoveFCReshapeToWeights : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveFCReshapeToWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveFCReshapeToWeights"); MoveFCReshapeToWeights(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp index a90487391b32ac..7a3aa4b3432318 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class NgramFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NgramFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("NgramFusion"); NgramFusion(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp index bbca2449a473af..369ef8ccab92e3 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp @@ -14,7 +14,7 @@ namespace intel_cpu { class PermuteSliceAndInterpolation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PermuteSliceAndInterpolation", "0"); + OPENVINO_MATCHER_PASS_RTTI("PermuteSliceAndInterpolation"); PermuteSliceAndInterpolation(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp index d7bfd28f0db80d..a63a3dce8219c2 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp @@ -11,19 +11,19 @@ namespace intel_cpu { class OptimizeGRUSequenceTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeGRUSequenceTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeGRUSequenceTransposes"); OptimizeGRUSequenceTransposes(); }; class OptimizeLSTMSequenceTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeLSTMSequenceTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeLSTMSequenceTransposes"); OptimizeLSTMSequenceTransposes(); }; class OptimizeRNNSequenceTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeRNNSequenceTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeRNNSequenceTransposes"); OptimizeRNNSequenceTransposes(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp index bc000eb0485cd2..96028402aa9f92 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { class StatefulSDPAFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("StatefulSDPAFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("StatefulSDPAFusion"); StatefulSDPAFusion(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp index 747ca3482eaf0c..7c53d73104fc1d 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp @@ -11,6 +11,7 @@ namespace intel_cpu { class SwapConvertTranspose : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("SwapConvertTranspose"); SwapConvertTranspose(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp index 9fee162128f3ab..d9ed8e298d18fe 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp @@ -11,19 +11,19 @@ namespace intel_cpu { class ConvertToInteraction : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToInteraction", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToInteraction"); ConvertToInteraction(); }; class FuseFQtoInteraction : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseFQtoInteraction", "0"); + OPENVINO_MATCHER_PASS_RTTI("FuseFQtoInteraction"); FuseFQtoInteraction(); }; class ConvertInteractionInt8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertInteractionInt8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertInteractionInt8"); ConvertInteractionInt8(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp index 9273220c4a0a0c..fe4f4ccae04f1c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp @@ -11,6 +11,9 @@ namespace ov { namespace intel_cpu { class MHAFusionBase : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("MHAFusionBase"); + protected: bool valid_transpose_order(const std::shared_ptr& node, const std::vector& expected_order) { if (auto transpose_pattern = ov::as_type_ptr(node)) { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp index 5754d0fa9b622f..139aaaa488a1cb 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp @@ -11,9 +11,9 @@ namespace intel_cpu { class MLPFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MLPFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MLPFusion"); MLPFusion(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp index bfaf42f1f0acd3..e5b1743a68f7e6 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp @@ -11,15 +11,15 @@ namespace intel_cpu { class QKVProjFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("QKVProjFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("QKVProjFusion"); QKVProjFusion(); }; class QKVProjFusion2 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("QKVProjFusion2", "0"); + OPENVINO_MATCHER_PASS_RTTI("QKVProjFusion2"); QKVProjFusion2(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp index d2bb84893c0728..ebba0a3ec9e185 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { class SDPAFuseTransposeReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SDPAFuseTransposeReshape", "0"); + OPENVINO_MATCHER_PASS_RTTI("SDPAFuseTransposeReshape"); SDPAFuseTransposeReshape(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp b/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp index 57cf48283e4552..b171e861b23cba 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp @@ -17,6 +17,7 @@ namespace pass { */ class MulAddToFMA : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("MulAddToFMA"); MulAddToFMA(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp index 9475171b24f65d..245f83c13c3466 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp @@ -34,7 +34,7 @@ namespace pass { */ class BrgemmToBrgemmCPU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BrgemmToBrgemmCPU", "0"); + OPENVINO_MATCHER_PASS_RTTI("BrgemmToBrgemmCPU"); BrgemmToBrgemmCPU(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp index 89815eb1d6ffbf..c330bc9c922381 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp @@ -20,7 +20,7 @@ namespace pass { */ class EliminateBrgemmCopyB : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateBrgemmCopyB", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateBrgemmCopyB"); EliminateBrgemmCopyB(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp index 000321a8918ccd..2c965871840572 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp @@ -17,7 +17,7 @@ namespace pass { */ class RemoveConverts : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RemoveConverts", "0"); + OPENVINO_MATCHER_PASS_RTTI("RemoveConverts"); RemoveConverts(); }; diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp index 0fb26f6df1d6b2..2b73104d1e1335 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp @@ -18,7 +18,7 @@ namespace pass { */ class BrgemmToBrgemmTPP: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BrgemmToBrgemmTPP", "0"); + OPENVINO_MATCHER_PASS_RTTI("BrgemmToBrgemmTPP"); BrgemmToBrgemmTPP(); static bool is_supported_brgemm_configuration(const std::vector>& layouts, diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp index 189d048e86ffd7..f0bdab120c3498 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp @@ -18,7 +18,7 @@ namespace pass { */ class EltwiseToEltwiseTPP: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EltwiseToEltwiseTPP", "0"); + OPENVINO_MATCHER_PASS_RTTI("EltwiseToEltwiseTPP"); EltwiseToEltwiseTPP(); }; diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp index 8bbfcf80c751ac..a56e23363067e2 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ScalarToScalarTPP: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ScalarToScalarTPP", "0"); + OPENVINO_MATCHER_PASS_RTTI("ScalarToScalarTPP"); ScalarToScalarTPP(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp b/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp index 1a869b8afbddf2..1fab1692d97cbe 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp @@ -13,7 +13,7 @@ namespace intel_gpu { // and adds optional padding to align elements count to `alignment` value class BroadcastAndPadZeroPointBuffers : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BroadcastAndPadZeroPointBuffers", "0"); + OPENVINO_MATCHER_PASS_RTTI("BroadcastAndPadZeroPointBuffers"); BroadcastAndPadZeroPointBuffers(size_t alignment = 1, bool supports_immad = false); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp b/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp index 90d62a8e7a1fa5..bf372ae19c1553 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class ConvertBinaryConvolutionToConvolution: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBinaryConvolutionToConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBinaryConvolutionToConvolution"); ConvertBinaryConvolutionToConvolution(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp index ac93d446ee749d..bea51c2e768576 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp @@ -18,7 +18,7 @@ namespace intel_gpu { */ class ClampFP16Output: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::intel_gpu::ClampFP16Output"); + OPENVINO_MATCHER_PASS_RTTI("ov::intel_gpu::ClampFP16Output"); ClampFP16Output(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp index 656b4c6fd99c20..0d23eb49829217 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp @@ -102,13 +102,13 @@ ov::Tensor get_compensation(std::shared_ptr w, std::shared_ptr class KVCacheCompressionMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("KVCacheCompressionMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("KVCacheCompressionMatcher"); KVCacheCompressionMatcher(ov::element::Type compression_dt); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp index 8be42a1311094b..f22b32b23ea407 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp @@ -30,7 +30,7 @@ namespace intel_gpu { class KVCacheFusionMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("KVCacheFusionMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("KVCacheFusionMatcher"); KVCacheFusionMatcher(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp b/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp index f573abd4589e8f..7237e5c97ace70 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp @@ -30,7 +30,7 @@ namespace intel_gpu { */ class MoveFCReshapeToWeights: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveFCReshapeToWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveFCReshapeToWeights"); MoveFCReshapeToWeights(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp b/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp index 3a38bb92ad5167..702b18e7fc5dc9 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp @@ -15,7 +15,7 @@ namespace intel_gpu { */ class OptimizeSubsequentReshapes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeSubsequentReshapes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeSubsequentReshapes"); OptimizeSubsequentReshapes(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp index 4869100054b819..a845c7a7aa86b0 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp @@ -17,19 +17,19 @@ class TransposeFusion: public ov::pass::GraphRewrite { class TransposeMatMulMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeMatMulMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeMatMulMatcher"); TransposeMatMulMatcher(bool supports_immad); }; class TransposeMatMulTransposeMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeMatMulTransposeMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeMatMulTransposeMatcher"); TransposeMatMulTransposeMatcher(bool supports_immad); }; class TransposeSDPAMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeSDPAMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeSDPAMatcher"); TransposeSDPAMatcher(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp index 35ed30cdc9726e..3e5926561820b8 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class UnsqueezeBroadcastReshapeMatmulFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UnsqueezeBroadcastReshapeMatmulFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("UnsqueezeBroadcastReshapeMatmulFusion"); UnsqueezeBroadcastReshapeMatmulFusion(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp index ede3ac16fb51b5..e740e846409ea9 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class UnsqueezeBroadcastReshapeSDPAFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UnsqueezeBroadcastReshapeSDPAFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("UnsqueezeBroadcastReshapeSDPAFusion"); UnsqueezeBroadcastReshapeSDPAFusion(); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp index e77a02a4439018..24d682c603a799 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp @@ -23,6 +23,7 @@ namespace avoid { // Note: this pattern is only utilized by the online partitioner class RMSNorm : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::avoid::RMSNorm"); RMSNorm(const std::shared_ptr& snapshot, const std::string& avoid_device); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp index 77bc9fb3f90418..8a70290dfabe4e 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp @@ -23,41 +23,49 @@ namespace compute { class DQMatMulGQu4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulGQu4"); DQMatMulGQu4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulCWu4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulCWu4"); DQMatMulCWu4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulGQi4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulGQi4"); DQMatMulGQi4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulCWi4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulCWi4"); DQMatMulCWi4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulConv : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulConv"); DQMatMulConv(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class VocabMatMul : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::VocabMatMul"); VocabMatMul(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class RMSNorm : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::RMSNorm"); RMSNorm(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class RMSNorm2 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::RMSNorm2"); RMSNorm2(const std::shared_ptr& snapshot, const std::string& isol_tag); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp index da06a5304c8bd7..4b4cbdd823dfb2 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp @@ -54,6 +54,9 @@ void finalize_remap(Function& fbody, Subgraph& fsg, const ClosureRemap& m); namespace SymmNoZP { class DCOFFPassBase : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmNoZP::DCOFFPassBase"); + protected: DCOffMode m_dcoff_mode = DCOffMode::CAST_ONLY; ov::element::Type m_dcoff_type; @@ -94,6 +97,9 @@ class DCOFFPassGather final : public DCOFFPassBase { namespace SymmZP { // TODO: Not sure if it is actually Symm.. class DCOFFPassBase : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassBase"); + protected: DCOffMode m_dcoff_mode = DCOffMode::CAST_ONLY; ov::element::Type m_dcoff_type; @@ -129,21 +135,26 @@ class DCOFFPassConvert1 final : public DCOFFPassBase { class DCOFFPassReshape2 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassReshape2"); DCOFFPassReshape2(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; class DCOFFPassReshape3 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassReshape3"); DCOFFPassReshape3(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; class DCOFFPassReshape4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassReshape4"); DCOFFPassReshape4(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; class CWAI1 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::CWAI1"); + using CPtr = std::shared_ptr; using Results = std::reference_wrapper>; @@ -152,6 +163,8 @@ class CWAI1 : public ov::pass::MatcherPass { class CWAI2 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::CWAI2"); + using CPtr = std::shared_ptr; using Results = std::reference_wrapper>; @@ -160,6 +173,8 @@ class CWAI2 : public ov::pass::MatcherPass { class CWAI3 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::CWAI3"); + using CPtr = std::shared_ptr; using Results = std::reference_wrapper>; @@ -171,6 +186,7 @@ class CWAI3 : public ov::pass::MatcherPass { namespace AsymmZP { class DCOFFPassReshape : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::AsymmZP::DCOFFPassReshape"); DCOFFPassReshape(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp index 904ce88039d2eb..f2cdee0f436d57 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp @@ -64,31 +64,37 @@ struct Context { class DQMatMulCWi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulCWi"); explicit DQMatMulCWi(Context::Ref ctx); }; class DQMatMulGQi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQi"); explicit DQMatMulGQi(Context::Ref ctx); }; class DQMatMulGQ2i : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQ2i"); explicit DQMatMulGQ2i(Context::Ref ctx); }; class DQMatMulGQiP : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQiP"); explicit DQMatMulGQiP(Context::Ref ctx); }; class DQMatMulGQ2iP : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQ2iP"); explicit DQMatMulGQ2iP(Context::Ref ctx); }; class DQParMMGQ : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQParMMGQ"); explicit DQParMMGQ(Context::Ref ctx); }; @@ -98,16 +104,19 @@ void mergeParallelMatMuls(const std::shared_ptr& m, Context& ctx); class DQLiftGatherAsymCW : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQLiftGatherAsymCW"); DQLiftGatherAsymCW(); }; class DQLiftGatherSymCW : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQLiftGatherSymCW"); DQLiftGatherSymCW(); }; class DQLiftGatherSymGQ : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQLiftGatherSymGQ"); DQLiftGatherSymGQ(); }; @@ -115,21 +124,25 @@ class DQLiftGatherSymGQ : public ov::pass::MatcherPass { class DQUnpackDictGatheru : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictGatheru"); DQUnpackDictGatheru(Context::Ref ctx); }; class DQUnpackDictGatherGQi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictGatherGQi"); DQUnpackDictGatherGQi(Context::Ref ctx); }; class HostGather : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::HostGather"); HostGather(Context::Ref ctx); }; class HostGatherDQ : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::HostGatherDQ"); HostGatherDQ(Context::Ref ctx); }; @@ -137,43 +150,51 @@ class HostGatherDQ : public ov::pass::MatcherPass { class DQUnpackDictMatMulCWu : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictMatMulCWu"); DQUnpackDictMatMulCWu(Context::Ref ctx); }; class DQUnpackDictMatMulGQi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictMatMulGQi"); DQUnpackDictMatMulGQi(Context::Ref ctx); }; class CompressDictMatMulf32 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::CompressDictMatMulf32"); CompressDictMatMulf32(Context::Ref ctx); }; // Slice last Matmul class SliceLastMatmul : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmul"); SliceLastMatmul(); }; class SliceLastMatmulAdd : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmulAdd"); SliceLastMatmulAdd(); }; class SliceLastMatmulTranspose : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmulTranspose"); SliceLastMatmulTranspose(); }; class SliceLastMatmulMultiply : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmulMultiply"); SliceLastMatmulMultiply(); }; // Convolution to MatMul class ConvToMatmul : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::ConvToMatmul"); ConvToMatmul(Context::Ref ctx); }; From e8f906e165a09b5eb707d8857edc96c051887f57 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Thu, 19 Dec 2024 18:48:47 +0100 Subject: [PATCH 30/60] [RTTI] Add OPENVINO_MODEL_PASS_RTTI definition (#28123) ### Details: - Adds RTTI definition for passes derived from ov::pass::ModelPass. - Applies the macro where applicable. ### Tickets: - CVS-159568 --------- Signed-off-by: Tomasz Jankowski Co-authored-by: Michal Lukaszewski --- .../low_precision/align_quantization_intervals.hpp | 2 +- .../align_quantization_parameters.hpp | 2 +- .../include/low_precision/low_precision.hpp | 4 ++-- .../markup_avg_pool_precision_preserved.hpp | 2 +- .../low_precision/markup_can_be_quantized.hpp | 2 +- .../include/low_precision/markup_precisions.hpp | 2 +- .../markup_quantization_granularity.hpp | 3 ++- .../include/low_precision/propagate_precisions.hpp | 2 +- .../low_precision/propagate_shared_value.hpp | 1 + .../tests/simple_low_precision_transformer.hpp | 1 + .../offline_transformations/include/pruning.hpp | 4 ++-- .../include/snippets/pass/align_element_types.hpp | 2 +- .../snippets/pass/analyze_broadcastable_inputs.hpp | 2 +- .../include/snippets/pass/canonicalization.hpp | 2 +- .../include/snippets/pass/fq_decomposition.hpp | 2 ++ src/common/snippets/include/snippets/pass/hash.hpp | 2 +- .../include/snippets/pass/propagate_precision.hpp | 2 +- .../include/snippets/pass/tokenization.hpp | 5 +++-- .../snippets/include/snippets/pass/validate.hpp | 2 +- .../change_placeholder_types.hpp | 2 +- .../common_optimizations/common_optimizations.hpp | 2 +- .../common_optimizations/dimension_tracking.hpp | 2 +- .../common_optimizations/fused_names_cleanup.hpp | 2 +- .../mark_precision_sensitive_shapeof_subgraphs.hpp | 2 +- .../moc_legacy_transformations.hpp | 2 +- .../common_optimizations/moc_transformations.hpp | 10 ++++++---- .../optimize_strided_slice.hpp | 8 ++++---- .../push_constant_to_subgraph.hpp | 2 +- .../remove_multi_subgraph_op_dangling_params.hpp | 2 +- .../reverse_shape_and_type_infer.hpp | 2 +- .../common_optimizations/ric_fusion.hpp | 2 +- .../shared_ops_optimization.hpp | 2 +- .../simplify_shape_of_sub_graph.hpp | 2 +- .../transformations/control_flow/unroll_if.hpp | 2 +- .../control_flow/unroll_tensor_iterator.hpp | 2 +- .../include/transformations/convert_precision.hpp | 2 +- .../align_mixed_fp32_fp16_types.hpp | 2 +- .../convert_compression_only_to_legacy.hpp | 2 +- .../mark_subgraphs_to_keep_in_mixed_precision.hpp | 2 +- .../include/transformations/hash.hpp | 2 +- .../include/transformations/init_node_info.hpp | 2 +- .../opset_conversions/convert_opset2_to_opset1.hpp | 2 +- .../opset_conversions/convert_opset3_to_opset2.hpp | 2 +- .../transformations/resolve_names_collisions.hpp | 2 +- .../smart_reshape/lstm_states_broadcast.hpp | 2 +- .../smart_reshape/smart_reshape.hpp | 2 +- .../symbol_optimization.hpp | 4 ++-- .../symbolic_optimizations.hpp | 6 +++--- .../transpose_sinking/ts_general.hpp | 2 +- .../include/transformations/utils/print_model.hpp | 2 +- .../common_optimizations/ric_fusion.cpp | 2 +- .../include/openvino/pass/constant_folding.hpp | 2 +- .../include/openvino/pass/convert_fp32_to_fp16.hpp | 2 +- src/core/include/openvino/pass/graph_rewrite.hpp | 2 +- src/core/include/openvino/pass/low_latency.hpp | 2 +- src/core/include/openvino/pass/make_stateful.hpp | 2 +- src/core/include/openvino/pass/pass.hpp | 14 ++++++++++++-- .../openvino/pass/sdpa_to_paged_attention.hpp | 2 +- src/core/include/openvino/pass/serialize.hpp | 4 ++-- .../openvino/pass/stateful_to_stateless.hpp | 2 +- src/core/include/openvino/pass/validate.hpp | 2 +- src/core/include/openvino/pass/visualize_tree.hpp | 2 +- .../frontend/decoder_transformation_extension.cpp | 2 +- src/core/tests/pass_config.cpp | 2 +- .../src/extension/decoder_transformation.cpp | 1 + .../pytorch/src/transforms/dict_resolver.hpp | 4 ++-- .../prim_list_tuple_construct_replacer.hpp | 2 +- .../transforms/prim_unpack_parameter_replacer.hpp | 2 +- .../src/transforms/tuple_unpack_replacer.hpp | 2 +- .../src/transformations/switch_merge_resolve.hpp | 2 +- .../helper_transforms/const_to_result_remover.hpp | 2 +- .../saved_model_unused_remover.hpp | 2 +- .../tflite_quantize_resolver.hpp | 2 +- .../aarch64/pass/snippets_mark_skipped.hpp | 2 +- .../snippets/x64/pass/enforce_precision.hpp | 2 +- .../snippets/x64/pass/snippets_mark_skipped.hpp | 2 +- .../tpp/x64/pass/fuse_tpp_to_equations.hpp | 2 +- .../plugin/transformations/convert_convolution.hpp | 2 +- .../transformations/print_model_statistics.hpp | 2 +- .../include/common_test_utils/graph_comparator.hpp | 3 +++ .../common_test_utils/src/ov_test_utils.cpp | 1 + 81 files changed, 113 insertions(+), 90 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp b/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp index 2caf346bfc5da6..5d82af51847081 100644 --- a/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp +++ b/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp @@ -30,7 +30,7 @@ class LP_TRANSFORMATIONS_API AlignQuantizationIntervals; */ class ov::pass::low_precision::AlignQuantizationIntervals : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignQuantizationIntervals", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::AlignQuantizationIntervals"); AlignQuantizationIntervals(const std::vector& defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support()); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp b/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp index 89c9bf59747860..ce6db1c397522f 100644 --- a/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp +++ b/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp @@ -31,7 +31,7 @@ class LP_TRANSFORMATIONS_API AlignQuantizationParameters; */ class ov::pass::low_precision::AlignQuantizationParameters : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignQuantizationParameters", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::AlignQuantizationParameters"); AlignQuantizationParameters(const std::vector defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support()); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp index d6bddd3643a4f6..b3b92340303ced 100644 --- a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp +++ b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp @@ -42,7 +42,7 @@ class LP_TRANSFORMATIONS_API LowPrecision; class ov::pass::low_precision::MarkupOptimizations : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkupOptimizations", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupOptimizations"); MarkupOptimizations( const std::vector& precisionRestrictions, const std::vector& quantizationRestrictions, @@ -62,7 +62,7 @@ class ov::pass::low_precision::TypeRelaxedReplacer : public ov::pass::GraphRewri class LP_TRANSFORMATIONS_API ov::pass::low_precision::LowPrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("LowPrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::LowPrecision"); LowPrecision( const std::vector& precisionRestrictions = {}, const std::vector& quantizationRestrictions = {}, diff --git a/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp b/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp index da3605b4ee4ba5..4e65b9f414f9fc 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp @@ -29,7 +29,7 @@ class LP_TRANSFORMATIONS_API MarkupAvgPoolPrecisionPreserved; */ class ov::pass::low_precision::MarkupAvgPoolPrecisionPreserved : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkupAvgPoolPrecisionPreserved", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupAvgPoolPrecisionPreserved"); MarkupAvgPoolPrecisionPreserved(const std::vector defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support()); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp b/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp index f886e69f2088c7..7359a6617c96b6 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp @@ -30,7 +30,7 @@ class LP_TRANSFORMATIONS_API MarkupCanBeQuantized; */ class ov::pass::low_precision::MarkupCanBeQuantized : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkupCanBeQuantized", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupCanBeQuantized"); MarkupCanBeQuantized(const std::vector defaultPrecisions = { ov::element::u8, ov::element::i8 }); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp b/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp index 92ea93b2c3e5b1..56e926101581bf 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp @@ -36,6 +36,7 @@ class LP_TRANSFORMATIONS_API MarkupPrecisions; */ class ov::pass::low_precision::MarkupPrecisions : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupPrecisions"); class Restriction { public: class RestrictionByVersion { @@ -65,7 +66,6 @@ class ov::pass::low_precision::MarkupPrecisions : public ov::pass::ModelPass { std::unordered_map precisionsByVersion; }; - OPENVINO_RTTI("MarkupPrecisions", "0"); explicit MarkupPrecisions(const std::vector& restrictions = {}, const std::vector& defaultPrecisions = { ov::element::u8, ov::element::i8 }); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp b/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp index a61473cf263963..aa0ee4b4191de0 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp @@ -34,6 +34,8 @@ class LP_TRANSFORMATIONS_API MarkupQuantizationGranularity; */ class ov::pass::low_precision::MarkupQuantizationGranularity : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupQuantizationGranularity"); + class PerTensorQuantization { public: explicit PerTensorQuantization(const bool versionIsRequired) : versionIsRequired(versionIsRequired) {} @@ -45,7 +47,6 @@ class ov::pass::low_precision::MarkupQuantizationGranularity : public ov::pass:: std::unordered_map> portsByVersion; }; - OPENVINO_RTTI("MarkupPerTensorQuantization", "0"); explicit MarkupQuantizationGranularity(const std::vector& restrictions = {}); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp index c17fa2faded476..98f91bc5155d7b 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp @@ -31,7 +31,7 @@ class LP_TRANSFORMATIONS_API PropagatePrecisions; */ class ov::pass::low_precision::PropagatePrecisions : public ov::pass::ModelPass { public: - OPENVINO_RTTI("PropagatePrecisions", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::PropagatePrecisions"); PropagatePrecisions(const AttributeParameters& params = AttributeParameters()); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp index 16a014cda6ec04..9254e2fc007dab 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp @@ -38,6 +38,7 @@ class LP_TRANSFORMATIONS_API PropagateSharedValue; template class ov::pass::low_precision::PropagateSharedValue : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("low_precision::PropagateSharedValue"); bool run_on_model(const std::shared_ptr& f) override { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "PropagateSharedValue"); diff --git a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp index 7593361f8dd71a..36cf9747f6d76b 100644 --- a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp +++ b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp @@ -16,6 +16,7 @@ class SimpleLowPrecisionTransformer : public ov::pass::ModelPass{ public: + OPENVINO_MODEL_PASS_RTTI("SimpleLowPrecisionTransformer"); SimpleLowPrecisionTransformer( const std::vector& precisionRestrictions = {}, const std::vector& quantizationRestrictions = {}, diff --git a/src/common/offline_transformations/include/pruning.hpp b/src/common/offline_transformations/include/pruning.hpp index dd5374d7477ded..e573108a89eb86 100644 --- a/src/common/offline_transformations/include/pruning.hpp +++ b/src/common/offline_transformations/include/pruning.hpp @@ -67,7 +67,7 @@ class ov::pass::PropagateMasks : public ov::pass::GraphRewrite { */ class ov::pass::ShrinkWeights : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ShrinkWeights", "0"); + OPENVINO_MODEL_PASS_RTTI("ShrinkWeights"); bool run_on_model(const std::shared_ptr&) override; }; @@ -77,6 +77,6 @@ class ov::pass::ShrinkWeights : public ov::pass::ModelPass { */ class ov::pass::Pruning : public ov::pass::ModelPass { public: - OPENVINO_RTTI("Pruning", "0"); + OPENVINO_MODEL_PASS_RTTI("Pruning"); bool run_on_model(const std::shared_ptr&) override; }; diff --git a/src/common/snippets/include/snippets/pass/align_element_types.hpp b/src/common/snippets/include/snippets/pass/align_element_types.hpp index 1d5a1fa9256c88..6261b2b87b2a65 100644 --- a/src/common/snippets/include/snippets/pass/align_element_types.hpp +++ b/src/common/snippets/include/snippets/pass/align_element_types.hpp @@ -19,7 +19,7 @@ namespace pass { */ class AlignElementTypes: public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignElementTypes"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::AlignElementTypes"); AlignElementTypes(std::vector input_precisions, std::vector output_precisions); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp b/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp index 4367567c9df1c7..9d23e462ddff94 100644 --- a/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp +++ b/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp @@ -21,7 +21,7 @@ namespace pass { */ class AnalyzeBroadcastableInputs : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AnalyzeBroadcastableInputs"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::AnalyzeBroadcastableInputs"); // [Index of Parameter -> Index of broadcastable dimension from end] using BroadcastableInputsMap = std::map; AnalyzeBroadcastableInputs(BroadcastableInputsMap& map); diff --git a/src/common/snippets/include/snippets/pass/canonicalization.hpp b/src/common/snippets/include/snippets/pass/canonicalization.hpp index 5c7acaa781d2b8..645184a55609ba 100644 --- a/src/common/snippets/include/snippets/pass/canonicalization.hpp +++ b/src/common/snippets/include/snippets/pass/canonicalization.hpp @@ -22,7 +22,7 @@ namespace pass { */ class Canonicalization: public ov::pass::ModelPass { public: - OPENVINO_RTTI("Canonicalization"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::Canonicalization"); using BlockedShapeVector = op::Subgraph::BlockedShapeVector; using Layout = std::vector; explicit Canonicalization(const BlockedShapeVector& blocked_input_shapes); diff --git a/src/common/snippets/include/snippets/pass/fq_decomposition.hpp b/src/common/snippets/include/snippets/pass/fq_decomposition.hpp index 1e4af6c04e22fa..9f2b0ab7cc4bf9 100644 --- a/src/common/snippets/include/snippets/pass/fq_decomposition.hpp +++ b/src/common/snippets/include/snippets/pass/fq_decomposition.hpp @@ -80,6 +80,8 @@ class FakeQuantizeDecomposition : public ov::pass::MatcherPass { */ class CommonFakeQuantizeDecomposition: public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("snippets::pass::CommonFakeQuantizeDecomposition"); + bool run_on_model(const std::shared_ptr& m) override; static bool is_supported_fq(const std::shared_ptr& fq); diff --git a/src/common/snippets/include/snippets/pass/hash.hpp b/src/common/snippets/include/snippets/pass/hash.hpp index 66126cd21bbe6d..fc3bd9a64d0bdc 100644 --- a/src/common/snippets/include/snippets/pass/hash.hpp +++ b/src/common/snippets/include/snippets/pass/hash.hpp @@ -18,7 +18,7 @@ namespace pass { */ class Hash : public ov::pass::ModelPass { public: - OPENVINO_RTTI("HashPass", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::Hash"); bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/snippets/include/snippets/pass/propagate_precision.hpp b/src/common/snippets/include/snippets/pass/propagate_precision.hpp index 6f805cb1b68808..66d0b28430dd5f 100644 --- a/src/common/snippets/include/snippets/pass/propagate_precision.hpp +++ b/src/common/snippets/include/snippets/pass/propagate_precision.hpp @@ -19,7 +19,7 @@ namespace pass { */ class PropagatePrecision: public ov::pass::ModelPass { public: - OPENVINO_RTTI("PropagatePrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::PropagatePrecision"); PropagatePrecision(const std::shared_ptr& target_machine); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/snippets/include/snippets/pass/tokenization.hpp b/src/common/snippets/include/snippets/pass/tokenization.hpp index 24efcceec71a24..fc7ed8aace6d64 100644 --- a/src/common/snippets/include/snippets/pass/tokenization.hpp +++ b/src/common/snippets/include/snippets/pass/tokenization.hpp @@ -37,7 +37,7 @@ int64_t GetTopologicalOrder(const std::shared_ptr&); */ class EnumerateNodes : public ov::pass::ModelPass { public: - OPENVINO_RTTI("EnumerateNodes", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::EnumerateNodes"); EnumerateNodes() : ModelPass() {} bool run_on_model(const std::shared_ptr&) override; }; @@ -59,6 +59,8 @@ class EnumerateNodes : public ov::pass::ModelPass { */ class SnippetsTokenization : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("snippets::pass::SnippetsTokenization"); + /** * @interface Config * @brief Allow to adjust tokenization passes @@ -123,7 +125,6 @@ class SnippetsTokenization : public ov::pass::ModelPass { std::set m_mha_supported_transpose_ranks = { 3, 4 }; }; - OPENVINO_RTTI("SnippetsTokenization", "0"); SnippetsTokenization(const Config& config) : m_config(config) {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/snippets/include/snippets/pass/validate.hpp b/src/common/snippets/include/snippets/pass/validate.hpp index 1fedf8a2ce08d5..21d748eaa3afbb 100644 --- a/src/common/snippets/include/snippets/pass/validate.hpp +++ b/src/common/snippets/include/snippets/pass/validate.hpp @@ -17,7 +17,7 @@ namespace pass { */ class Validate: public ov::pass::ModelPass { public: - OPENVINO_RTTI("Validate", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::Validate"); Validate(const std::shared_ptr& pass_config) : m_pass_config(pass_config) {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp b/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp index d2ff059d5974ef..023937b79df48a 100644 --- a/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp @@ -20,7 +20,7 @@ class TRANSFORMATIONS_API ChangePlaceholderTypes; */ class ChangePlaceholderTypes : public ModelPass { public: - OPENVINO_RTTI("ChangePlaceholderTypes", "0"); + OPENVINO_MODEL_PASS_RTTI("ChangePlaceholderTypes"); explicit ChangePlaceholderTypes(const std::vector& params_with_custom_types) : m_params_with_custom_types(params_with_custom_types) {} bool run_on_model(const std::shared_ptr& model) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp b/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp index 7540275ce74d0f..35017f5984f9a1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API CommonOptimizations; class ov::pass::CommonOptimizations : public ov::pass::ModelPass { public: - OPENVINO_RTTI("CommonOptimizations", "0"); + OPENVINO_MODEL_PASS_RTTI("CommonOptimizations"); bool run_on_model(const std::shared_ptr& f) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp b/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp index d1ec2069b3f621..ca176057972eaa 100644 --- a/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp @@ -22,7 +22,7 @@ class TRANSFORMATIONS_API FindBatch; class ov::pass::FindBatch : public ov::pass::ModelPass { public: - OPENVINO_RTTI("FindBatch"); + OPENVINO_MODEL_PASS_RTTI("FindBatch"); FindBatch(bool detach_detection_output = false, bool track = true) : track(track), detach_do(detach_detection_output) {} diff --git a/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp b/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp index 8058a01811d9bc..fd7ce8defba920 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API FusedNamesCleanup; */ class ov::pass::FusedNamesCleanup : public ov::pass::ModelPass { public: - OPENVINO_RTTI("FusedNamesCleanup", "0"); + OPENVINO_MODEL_PASS_RTTI("FusedNamesCleanup"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp b/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp index 05e73456d46ee0..3e20e7535f8fed 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API MarkShapeOfSubgraphs; */ class ov::pass::MarkPrecisionSensitiveShapeOfSubgraphs : public ModelPass { public: - OPENVINO_RTTI("MarkPrecisionSensitiveShapeOfSubgraphs", "0"); + OPENVINO_MODEL_PASS_RTTI("MarkPrecisionSensitiveShapeOfSubgraphs"); MarkPrecisionSensitiveShapeOfSubgraphs(); bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp b/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp index 833ce83e6cf065..0f40a4d81a1bde 100644 --- a/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp @@ -32,7 +32,7 @@ namespace pass { class MOCLegacyTransformations : public ModelPass { public: - OPENVINO_RTTI("MOCLegacyTransformations", "0"); + OPENVINO_MODEL_PASS_RTTI("MOCLegacyTransformations"); explicit MOCLegacyTransformations(const std::vector& params_with_custom_types) : m_params_with_custom_types(params_with_custom_types) {} bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp b/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp index 49893dfb220de6..b65c9b84456ff8 100644 --- a/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp @@ -24,11 +24,9 @@ class TRANSFORMATIONS_API MOCTransformations; */ class ov::pass::MOCTransformations : public ov::pass::ModelPass { - bool m_use_shapes; - bool m_low_precision_enabled; - public: - OPENVINO_RTTI("MOCTransformations", "0"); + OPENVINO_MODEL_PASS_RTTI("MOCTransformations"); + /** * use_shapes = True enables transformations which are depends on shapes and also it * enables ConstantFolding for all ShapeOf operations. @@ -41,4 +39,8 @@ class ov::pass::MOCTransformations : public ov::pass::ModelPass { m_low_precision_enabled(low_precision_enabled) {} bool run_on_model(const std::shared_ptr& m) override; + +private: + bool m_use_shapes; + bool m_low_precision_enabled; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp b/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp index 454378a0e9bbd1..cb642795254791 100644 --- a/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp @@ -29,7 +29,7 @@ class TRANSFORMATIONS_API SliceSequenceToSingleSlice; */ class ov::pass::UselessSliceEraser : public ov::pass::ModelPass { public: - OPENVINO_RTTI("UselessSliceEraser", "0"); + OPENVINO_MODEL_PASS_RTTI("UselessSliceEraser"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -41,7 +41,7 @@ class ov::pass::UselessSliceEraser : public ov::pass::ModelPass { */ class ov::pass::GroupedStridedSliceOptimizer : public ov::pass::ModelPass { public: - OPENVINO_RTTI("GroupedStridedSliceOptimizer", "0"); + OPENVINO_MODEL_PASS_RTTI("GroupedStridedSliceOptimizer"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -53,7 +53,7 @@ class ov::pass::GroupedStridedSliceOptimizer : public ov::pass::ModelPass { */ class ov::pass::GroupedSliceToVSplitOptimization : public ov::pass::ModelPass { public: - OPENVINO_RTTI("GroupedSliceToVSplitOptimization", "0"); + OPENVINO_MODEL_PASS_RTTI("GroupedSliceToVSplitOptimization"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -82,9 +82,9 @@ class ov::pass::SliceSequenceToSingleSlice : public ov::pass::MatcherPass { */ class ov::pass::StridedSliceOptimization : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("StridedSliceOptimization"); StridedSliceOptimization(bool use_shapes = true); - OPENVINO_RTTI("StridedSliceOptimization", "0"); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp b/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp index e571b9a41869f2..085da725a64233 100644 --- a/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp @@ -17,7 +17,7 @@ namespace pass { */ class TRANSFORMATIONS_API PushConstantToSubgraph : public ov::pass::ModelPass { public: - OPENVINO_RTTI("PushConstantToSubgraph", "0"); + OPENVINO_MODEL_PASS_RTTI("PushConstantToSubgraph"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp b/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp index 359d68f7c980c2..b5b35e44364e46 100644 --- a/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API RemoveMultiSubGraphOpDanglingParamsResults; class ov::pass::RemoveMultiSubGraphOpDanglingParamsResults : public ov::pass::ModelPass { public: - OPENVINO_RTTI("RemoveMultiSubGraphOpDanglingParamsResults", "0"); + OPENVINO_MODEL_PASS_RTTI("RemoveMultiSubGraphOpDanglingParamsResults"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp b/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp index cfc5d789d9c07e..f0f9fe269e6206 100644 --- a/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp @@ -20,7 +20,7 @@ class TRANSFORMATIONS_API ReverseShapeAndTypeInfer; */ class ov::pass::ReverseShapeAndTypeInfer : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ReverseShapeAndTypeInfer", "0"); + OPENVINO_MODEL_PASS_RTTI("ReverseShapeAndTypeInfer"); bool run_on_model(const std::shared_ptr& f) override; private: diff --git a/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp index c91c27a464cada..1c56d61bb615c9 100644 --- a/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API ReverseInputChannelsFusion; class ov::pass::ReverseInputChannelsFusion : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ReverseInputChannelsFusion", "0"); + OPENVINO_MODEL_PASS_RTTI("ReverseInputChannelsFusion"); bool run_on_model(const std::shared_ptr&) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp b/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp index 8e2a87502ebcc9..6ff8f611564db2 100644 --- a/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API SharedOpOptimization; */ class ov::pass::SharedOpOptimization : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SharedOpOptimization", "0"); + OPENVINO_MODEL_PASS_RTTI("SharedOpOptimization"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp index 509047578bd489..79e0ffd789bf7c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp @@ -41,7 +41,7 @@ class ov::pass::GroupedGatherElimination : public ov::pass::MatcherPass { */ class ov::pass::SimplifyShapeOfSubGraph : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SimplifyShapeOfSubGraph", "0"); + OPENVINO_MODEL_PASS_RTTI("SimplifyShapeOfSubGraph"); explicit SimplifyShapeOfSubGraph(bool use_shapes = true) : m_use_shapes(use_shapes){}; bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/transformations/include/transformations/control_flow/unroll_if.hpp b/src/common/transformations/include/transformations/control_flow/unroll_if.hpp index e59b60106e2536..5dc95ee3eeb85a 100644 --- a/src/common/transformations/include/transformations/control_flow/unroll_if.hpp +++ b/src/common/transformations/include/transformations/control_flow/unroll_if.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API UnrollIf; class ov::pass::UnrollIf : public ov::pass::ModelPass { public: - OPENVINO_RTTI("UnrollIf", "0"); + OPENVINO_MODEL_PASS_RTTI("UnrollIf"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp b/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp index 282aecab5c7f05..ce8b6419e6734e 100644 --- a/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp +++ b/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp @@ -28,6 +28,6 @@ class TRANSFORMATIONS_API UnrollTensorIterator; class ov::pass::UnrollTensorIterator : public ov::pass::ModelPass { public: - OPENVINO_RTTI("UnrollTensorIterator", "0"); + OPENVINO_MODEL_PASS_RTTI("UnrollTensorIterator"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/convert_precision.hpp b/src/common/transformations/include/transformations/convert_precision.hpp index 30f773da9e3be4..b411629ccbce77 100644 --- a/src/common/transformations/include/transformations/convert_precision.hpp +++ b/src/common/transformations/include/transformations/convert_precision.hpp @@ -79,7 +79,7 @@ using type_to_fuse_map = class ov::pass::ConvertPrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertPrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertPrecision"); ConvertPrecision(ov::element::Type_t from, ov::element::Type_t to, type_to_fuse_map additional_type_to_fuse_map = {}, diff --git a/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp b/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp index 4fd93d7742ab67..e890b89794d862 100644 --- a/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API AlignMixedFP32FP16Types; */ class ov::pass::AlignMixedFP32FP16Types : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignMixedFP32FP16Types", "0"); + OPENVINO_MODEL_PASS_RTTI("AlignMixedFP32FP16Types"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp b/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp index fbac44f637a2a2..c7c8e408da2209 100644 --- a/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertCompressedOnlyToLegacy; */ class ov::pass::ConvertCompressedOnlyToLegacy : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertCompressedOnlyToLegacy", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertCompressedOnlyToLegacy"); bool run_on_model(const std::shared_ptr& f) override; }; diff --git a/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp b/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp index 0ffce989f0d83c..d7f4e2991a3d07 100644 --- a/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp @@ -25,6 +25,6 @@ constexpr auto float16_min_normalized = float16::from_bits(0x0400); */ class ov::pass::MarkSugraphsToKeepInMixedPrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkSugraphsToKeepInMixedPrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("MarkSugraphsToKeepInMixedPrecision"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/hash.hpp b/src/common/transformations/include/transformations/hash.hpp index 73668c7e53548e..3d6cf8f46076dc 100644 --- a/src/common/transformations/include/transformations/hash.hpp +++ b/src/common/transformations/include/transformations/hash.hpp @@ -18,7 +18,7 @@ namespace pass { */ class TRANSFORMATIONS_API Hash : public ov::pass::ModelPass { public: - OPENVINO_RTTI("HashPass"); + OPENVINO_MODEL_PASS_RTTI("HashPass"); bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/transformations/include/transformations/init_node_info.hpp b/src/common/transformations/include/transformations/init_node_info.hpp index e458bcfdcd4bd4..9474edc00cf9e0 100644 --- a/src/common/transformations/include/transformations/init_node_info.hpp +++ b/src/common/transformations/include/transformations/init_node_info.hpp @@ -35,6 +35,6 @@ class TRANSFORMATIONS_API InitNodeInfo; */ class ov::pass::InitNodeInfo : public ov::pass::ModelPass { public: - OPENVINO_RTTI("InitNodeInfo", "0"); + OPENVINO_MODEL_PASS_RTTI("InitNodeInfo"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp b/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp index 8b523bc663912d..e3d511d396c510 100644 --- a/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp +++ b/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API ConvertOpSet2ToOpSet1; class ov::pass::ConvertOpSet2ToOpSet1 : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertOpSet2ToOpSet1", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertOpSet2ToOpSet1"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp b/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp index 8a44b0d24ad6e4..f5e4faeeca93f8 100644 --- a/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp +++ b/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API ConvertOpSet3ToOpSet2; class ov::pass::ConvertOpSet3ToOpSet2 : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertOpSet3ToOpSet2", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertOpSet3ToOpSet2"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/resolve_names_collisions.hpp b/src/common/transformations/include/transformations/resolve_names_collisions.hpp index f1d1f18a505441..8af93bd055043f 100644 --- a/src/common/transformations/include/transformations/resolve_names_collisions.hpp +++ b/src/common/transformations/include/transformations/resolve_names_collisions.hpp @@ -24,7 +24,7 @@ namespace pass { */ class TRANSFORMATIONS_API ResolveNameCollisions : public ModelPass { public: - OPENVINO_RTTI("ResolveNameCollisions", "0"); + OPENVINO_MODEL_PASS_RTTI("ResolveNameCollisions"); ResolveNameCollisions() = default; explicit ResolveNameCollisions(bool resolve_all_names) : m_resolve_all_names(resolve_all_names) {} bool run_on_model(const std::shared_ptr& model) override; diff --git a/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp b/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp index 55f80ace7812e3..6b93dd465c3ecd 100644 --- a/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API LSTMStatesBroadcast; class ov::pass::LSTMStatesBroadcast : public ov::pass::ModelPass { public: - OPENVINO_RTTI("LSTMStatesBroadcast", "0"); + OPENVINO_MODEL_PASS_RTTI("LSTMStatesBroadcast"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp b/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp index 55bdf523762d91..970d64447c798a 100644 --- a/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API SmartReshape; class ov::pass::SmartReshape : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SmartReshape", "0"); + OPENVINO_MODEL_PASS_RTTI("SmartReshape"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp b/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp index 179bc7d6cfcf52..323a1218bfdf4a 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp @@ -21,7 +21,7 @@ class TRANSFORMATIONS_API OptimizeSymbolsUsedAsValues; */ class ov::pass::ApplySymbolEquivalence : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ApplySymbolEquivalence", "0"); + OPENVINO_MODEL_PASS_RTTI("ApplySymbolEquivalence"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -32,6 +32,6 @@ class ov::pass::ApplySymbolEquivalence : public ov::pass::ModelPass { */ class ov::pass::OptimizeSymbolsUsedAsValues : public ov::pass::ModelPass { public: - OPENVINO_RTTI("OptimizeSymbolsUsedAsValues", "0"); + OPENVINO_MODEL_PASS_RTTI("OptimizeSymbolsUsedAsValues"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp index 17a5d8503cbcb9..c6a99c90122544 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp @@ -24,7 +24,7 @@ class TRANSFORMATIONS_API LabelResolvingThroughSelect; */ class ov::pass::SymbolicOptimizations : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SymbolicOptimizations", "0"); + OPENVINO_MODEL_PASS_RTTI("SymbolicOptimizations"); explicit SymbolicOptimizations(bool full_run = true); bool run_on_model(const std::shared_ptr& m) override; std::shared_ptr get_manager() { @@ -42,7 +42,7 @@ class ov::pass::SymbolicOptimizations : public ov::pass::ModelPass { */ class ov::pass::SymbolicPropagation : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SymbolicPropagation"); + OPENVINO_MODEL_PASS_RTTI("SymbolicPropagation"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -60,4 +60,4 @@ class ov::pass::LabelResolvingThroughSelect : public ov::pass::MatcherPass { public: OPENVINO_RTTI("LabelResolvingThroughSelect", "0"); LabelResolvingThroughSelect(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp index c794608798a4a4..09d4d5819322a9 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp @@ -52,6 +52,6 @@ class ov::pass::transpose_sinking::TSGeneralBackward : public ov::pass::GraphRew */ class ov::pass::transpose_sinking::TSGeneral : public ov::pass::ModelPass { public: - OPENVINO_RTTI("TSGeneral", "0"); + OPENVINO_MODEL_PASS_RTTI("TSGeneral"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/utils/print_model.hpp b/src/common/transformations/include/transformations/utils/print_model.hpp index 29340d60727c82..0829cd7e320e88 100644 --- a/src/common/transformations/include/transformations/utils/print_model.hpp +++ b/src/common/transformations/include/transformations/utils/print_model.hpp @@ -390,7 +390,7 @@ void dump_cpp_style(std::ostream& os, const std::shared_ptr& model) { class OPENVINO_API PrintModel : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::pass::PrintModel"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::PrintModel"); PrintModel(std::string file_name) { static int dump_index = 0; diff --git a/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp b/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp index 53f26e00136ee9..4e9715883ec9f8 100644 --- a/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp @@ -804,7 +804,7 @@ class ConvertPassThrough : public ov::pass::MatcherPass { class Constant : public ov::pass::ModelPass { public: - OPENVINO_RTTI("Constant", "0"); + OPENVINO_MODEL_PASS_RTTI("Constant"); Constant() = default; bool run_on_model(const std::shared_ptr& model) override { RUN_ON_FUNCTION_SCOPE(Constant); diff --git a/src/core/include/openvino/pass/constant_folding.hpp b/src/core/include/openvino/pass/constant_folding.hpp index 134f02c590974f..6b2e763e49cb1a 100644 --- a/src/core/include/openvino/pass/constant_folding.hpp +++ b/src/core/include/openvino/pass/constant_folding.hpp @@ -18,7 +18,7 @@ namespace pass { */ class OPENVINO_API ConstantFolding : public ModelPass { public: - OPENVINO_RTTI("ConstantFolding"); + OPENVINO_MODEL_PASS_RTTI("ConstantFolding"); bool run_on_model(const std::shared_ptr& model) override; protected: diff --git a/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp b/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp index ae9bb25efb4a87..ebae49b7a1fa05 100644 --- a/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp +++ b/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp @@ -14,7 +14,7 @@ namespace pass { */ class OPENVINO_API ConvertFP32ToFP16 : public ModelPass { public: - OPENVINO_RTTI("ConvertFP32ToFP16"); + OPENVINO_MODEL_PASS_RTTI("ConvertFP32ToFP16"); bool run_on_model(const std::shared_ptr&) override; }; } // namespace pass diff --git a/src/core/include/openvino/pass/graph_rewrite.hpp b/src/core/include/openvino/pass/graph_rewrite.hpp index 3fd801235c31a2..ec8e1339912513 100644 --- a/src/core/include/openvino/pass/graph_rewrite.hpp +++ b/src/core/include/openvino/pass/graph_rewrite.hpp @@ -34,7 +34,7 @@ namespace pass { /// \ingroup ov_pass_cpp_api class OPENVINO_API GraphRewrite : public ModelPass { public: - OPENVINO_RTTI("ov::pass::GraphRewrite"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::GraphRewrite"); GraphRewrite() = default; diff --git a/src/core/include/openvino/pass/low_latency.hpp b/src/core/include/openvino/pass/low_latency.hpp index b3a8d38f755d42..d7bead4972c39b 100644 --- a/src/core/include/openvino/pass/low_latency.hpp +++ b/src/core/include/openvino/pass/low_latency.hpp @@ -36,7 +36,7 @@ namespace pass { */ class OPENVINO_API LowLatency2 : public ModelPass { public: - OPENVINO_RTTI("LowLatency2"); + OPENVINO_MODEL_PASS_RTTI("LowLatency2"); explicit LowLatency2(bool use_const_initializer = true) : m_use_const_initializer(use_const_initializer) {} diff --git a/src/core/include/openvino/pass/make_stateful.hpp b/src/core/include/openvino/pass/make_stateful.hpp index a4132589fe6a41..064f3fdea6aad1 100644 --- a/src/core/include/openvino/pass/make_stateful.hpp +++ b/src/core/include/openvino/pass/make_stateful.hpp @@ -18,7 +18,7 @@ namespace pass { */ class OPENVINO_API MakeStateful : public ModelPass { public: - OPENVINO_RTTI("MakeStateful"); + OPENVINO_MODEL_PASS_RTTI("MakeStateful"); using ParamResPairs = std::vector, std::shared_ptr>>; diff --git a/src/core/include/openvino/pass/pass.hpp b/src/core/include/openvino/pass/pass.hpp index 5c27df8aed4a0d..3927e7542aa886 100644 --- a/src/core/include/openvino/pass/pass.hpp +++ b/src/core/include/openvino/pass/pass.hpp @@ -4,16 +4,26 @@ #pragma once -#include #include -#include +#include #include "openvino/core/core_visibility.hpp" #include "openvino/core/enum_mask.hpp" #include "openvino/core/model.hpp" #include "openvino/core/node.hpp" +#include "openvino/core/rtti.hpp" #include "openvino/pass/pass_config.hpp" +#define _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE(TYPE_NAME) _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE_VERSION(TYPE_NAME, "0") + +#define _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \ + _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT(TYPE_NAME, VERSION_NAME, ::ov::pass::ModelPass) + +#define OPENVINO_MODEL_PASS_RTTI(...) \ + _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR_2(__VA_ARGS__, \ + _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE_VERSION, \ + _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE)(__VA_ARGS__)) + namespace ov { namespace pass { enum class PassProperty : uint32_t { diff --git a/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp b/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp index a0dd403818b462..74aeacb0719cee 100644 --- a/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp +++ b/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp @@ -17,7 +17,7 @@ namespace pass { */ class OPENVINO_API SDPAToPagedAttention : public ModelPass { public: - OPENVINO_RTTI("SDPAToPagedAttention"); + OPENVINO_MODEL_PASS_RTTI("SDPAToPagedAttention"); SDPAToPagedAttention(bool use_block_indices_inputs = false, bool use_score_outputs = false); bool run_on_model(const std::shared_ptr& model) override; diff --git a/src/core/include/openvino/pass/serialize.hpp b/src/core/include/openvino/pass/serialize.hpp index d0eaadde346bf6..ff99a59c70b556 100644 --- a/src/core/include/openvino/pass/serialize.hpp +++ b/src/core/include/openvino/pass/serialize.hpp @@ -26,7 +26,7 @@ namespace pass { */ class OPENVINO_API Serialize : public ov::pass::ModelPass { public: - OPENVINO_RTTI("Serialize"); + OPENVINO_MODEL_PASS_RTTI("Serialize"); enum class Version : uint8_t { UNSPECIFIED = 0, // Use the latest or function version @@ -63,7 +63,7 @@ class OPENVINO_API Serialize : public ov::pass::ModelPass { */ class OPENVINO_API StreamSerialize : public ov::pass::ModelPass { public: - OPENVINO_RTTI("StreamSerialize"); + OPENVINO_MODEL_PASS_RTTI("StreamSerialize"); struct DataHeader { size_t custom_data_offset; diff --git a/src/core/include/openvino/pass/stateful_to_stateless.hpp b/src/core/include/openvino/pass/stateful_to_stateless.hpp index 90fd6b9e6e7901..551c9315c20f72 100644 --- a/src/core/include/openvino/pass/stateful_to_stateless.hpp +++ b/src/core/include/openvino/pass/stateful_to_stateless.hpp @@ -14,7 +14,7 @@ namespace pass { */ class OPENVINO_API StatefulToStateless : public ModelPass { public: - OPENVINO_RTTI("StatefulToStateless"); + OPENVINO_MODEL_PASS_RTTI("StatefulToStateless"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/core/include/openvino/pass/validate.hpp b/src/core/include/openvino/pass/validate.hpp index 23cfe9909f707b..dce6967131d33d 100644 --- a/src/core/include/openvino/pass/validate.hpp +++ b/src/core/include/openvino/pass/validate.hpp @@ -24,7 +24,7 @@ namespace pass { /// \ingroup ov_pass_cpp_api class OPENVINO_API Validate : public ModelPass { public: - OPENVINO_RTTI("ov::pass::Validate"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::Validate"); Validate() : ModelPass() {} bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/core/include/openvino/pass/visualize_tree.hpp b/src/core/include/openvino/pass/visualize_tree.hpp index 065cec96e27220..2207270cd57d3d 100644 --- a/src/core/include/openvino/pass/visualize_tree.hpp +++ b/src/core/include/openvino/pass/visualize_tree.hpp @@ -28,7 +28,7 @@ namespace pass { */ class OPENVINO_API VisualizeTree : public ModelPass { public: - OPENVINO_RTTI("ov::pass::VisualizeTree"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::VisualizeTree"); using node_modifiers_t = std::function& attributes)>; VisualizeTree(const std::string& file_name, node_modifiers_t nm = nullptr, bool dot_only = false); diff --git a/src/core/tests/frontend/decoder_transformation_extension.cpp b/src/core/tests/frontend/decoder_transformation_extension.cpp index 714dd4b9fafb18..d1110041392bf7 100644 --- a/src/core/tests/frontend/decoder_transformation_extension.cpp +++ b/src/core/tests/frontend/decoder_transformation_extension.cpp @@ -37,7 +37,7 @@ TEST(DecoderTransformation, FunctionPass) { namespace _decoder_transformation_test { class TestPass : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::pass::TestPass"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::TestPass"); TestPass() = default; TestPass(const TestPass& tp) = default; bool run_on_model(const std::shared_ptr&) override { diff --git a/src/core/tests/pass_config.cpp b/src/core/tests/pass_config.cpp index 15ebc71eef10a6..053cb2b62aff32 100644 --- a/src/core/tests/pass_config.cpp +++ b/src/core/tests/pass_config.cpp @@ -51,7 +51,7 @@ class RenameSigmoid : public ov::pass::MatcherPass { class TestModelPass : public pass::ModelPass { public: - OPENVINO_RTTI("TestModelPass"); + OPENVINO_MODEL_PASS_RTTI("TestModelPass"); bool run_on_model(const std::shared_ptr& f) override { pass::Manager manager(get_pass_config()); diff --git a/src/frontends/common/src/extension/decoder_transformation.cpp b/src/frontends/common/src/extension/decoder_transformation.cpp index 4533fb89d85651..940131479ca9e0 100644 --- a/src/frontends/common/src/extension/decoder_transformation.cpp +++ b/src/frontends/common/src/extension/decoder_transformation.cpp @@ -12,6 +12,7 @@ using namespace ov::frontend; /// \brief Helper class to register user function as a FunctionPass class CustomModelPass : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("frontend::CustomModelPass"); explicit CustomModelPass(std::function)> pass) : m_pass(std::move(pass)) {} bool run_on_model(const std::shared_ptr& f) override { diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.hpp b/src/frontends/pytorch/src/transforms/dict_resolver.hpp index 150b1361dab57d..b2830cecb51bdc 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.hpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.hpp @@ -15,13 +15,13 @@ namespace pass { // This transformation replaces pattern Parameter(Dict)->aten::__getitem__ class DictParameterResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DictParameterResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DictParameterResolver"); bool run_on_model(const std::shared_ptr& model) override; }; // This transformation replaces pattern prim::DictConstruct->Result class DictResultResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DictResultResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DictResultResolver"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp index 31a025d6d90493..cbe8e1eb0b62c9 100644 --- a/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class DecomposeListTupleResults : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeListTupleResults"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DecomposeListTupleResults"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp index 2616b66fb58ab6..5d339a307033ef 100644 --- a/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp @@ -27,7 +27,7 @@ namespace pass { // tuples only and the most nested objects in those tuples are tensors. class DecomposeUnpackParameters : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeUnpackParameters"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DecomposeUnpackParameters"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp index 625b986f3b64b7..2aec2824bbddd3 100644 --- a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp @@ -20,7 +20,7 @@ class PrimTupleUnpackReplacer : public ov::pass::MatcherPass { class TupleUnpackInBodyReplacer : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::TupleUnpackInBodyReplacer"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::TupleUnpackInBodyReplacer"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp b/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp index 6038005e2eb4c4..d9eb34fb58d81b 100644 --- a/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp +++ b/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp @@ -20,7 +20,7 @@ namespace pass { // Merge nodes can have the same eliminated markers that means the fused If will have several outputs. class SwitchMergeResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::SwitchMergeResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow::SwitchMergeResolver"); SwitchMergeResolver() = default; bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp b/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp index fc4d1c36fac3af..016f7b994dacb3 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp @@ -17,7 +17,7 @@ namespace pass { // We need to remove them because separate sub-graphs can solidly affect performance class ConstToResultRemover : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::UnsupportedConstToResultRemover"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow::pass::UnsupportedConstToResultRemover"); ConstToResultRemover() {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp b/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp index c7d457d8d6a361..fcc493458ccbba 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp @@ -15,7 +15,7 @@ namespace pass { // Results marked as unused by Saved Model settings class SavedModelUnusedRemover : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::SavedModelUnusedRemover"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow::pass::SavedModelUnusedRemover"); SavedModelUnusedRemover() {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp b/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp index 584e8c55b6a9ea..84cdf44cbf5b02 100644 --- a/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp +++ b/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp @@ -32,7 +32,7 @@ class TFLQuantizeReplacer : public ov::pass::MatcherPass { // This transformation simplifies type manipulations in the graph class TFLQuantizeResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeResolver"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp index 2b17039d198bf3..8ca0424ccda030 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp @@ -16,7 +16,7 @@ namespace intel_cpu { */ class SnippetsMarkSkipped : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SnippetsMarkSkipped", "0"); + OPENVINO_MODEL_PASS_RTTI("SnippetsMarkSkipped"); SnippetsMarkSkipped() : ModelPass() {} bool run_on_model(const std::shared_ptr&) override; }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp index 05f8d1206715f0..24e848cf157e0e 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp @@ -15,7 +15,7 @@ namespace pass { class EnforcePrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("EnforcePrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("EnforcePrecision"); EnforcePrecision(const element::Type source, const element::Type target, diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp index fc5250defac8cb..f71a376ee1f038 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp @@ -16,7 +16,7 @@ namespace intel_cpu { */ class SnippetsMarkSkipped : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SnippetsMarkSkipped", "0"); + OPENVINO_MODEL_PASS_RTTI("SnippetsMarkSkipped"); SnippetsMarkSkipped(bool enableBF16 = false) : ModelPass(), enableBF16(enableBF16) {} bool run_on_model(const std::shared_ptr&) override; diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp index 9a00f97d9c464d..a99330845d443d 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp @@ -18,7 +18,7 @@ namespace pass { */ class FuseTPPToEquations: public ov::pass::ModelPass { public: - OPENVINO_RTTI("FuseTPPToEquations", "0"); + OPENVINO_MODEL_PASS_RTTI("FuseTPPToEquations"); FuseTPPToEquations() = default; bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.hpp index 004bd4fad561a8..1cc30180db7142 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class ConvertConvolutionToInternal : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertConvolutionToInternal", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertConvolutionToInternal"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp b/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp index 079028f0154848..77922903c287bd 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class PrintModelStatistics : public ov::pass::ModelPass { public: - OPENVINO_RTTI("PrintModelStatistics", "0"); + OPENVINO_MODEL_PASS_RTTI("PrintModelStatistics"); PrintModelStatistics() = default; bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp index dbd945bc80a45b..461136bc08aaa8 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp @@ -127,6 +127,7 @@ namespace ov { namespace pass { class InjectionPass : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("InjectionPass"); using injection_callback = std::function)>; explicit InjectionPass(injection_callback callback) : ModelPass(), m_callback(std::move(callback)) {} @@ -268,6 +269,7 @@ class InitUniqueNames : public ov::pass::ModelPass { UniqueNamesHolder::Ptr m_unh; public: + OPENVINO_MODEL_PASS_RTTI("InitUniqueNames"); InitUniqueNames(UniqueNamesHolder::Ptr unh) : m_unh(unh) {} bool run_on_model(const std::shared_ptr& f) override { m_unh->init_names(f); @@ -279,6 +281,7 @@ class CheckUniqueNames : public ov::pass::ModelPass { UniqueNamesHolder::Ptr m_unh; public: + OPENVINO_MODEL_PASS_RTTI("CheckUniqueNames"); CheckUniqueNames(UniqueNamesHolder::Ptr unh, bool soft_names_comparison = false, bool result_friendly_names_check = true) diff --git a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp index d781d92b57052a..91ef3fd6a7ebe1 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp @@ -17,6 +17,7 @@ namespace pass { class CopyTensorNamesToRefModel : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("CopyTensorNamesToRefModel"); CopyTensorNamesToRefModel(const std::shared_ptr& ref_model) : m_ref_model(ref_model) {} bool run_on_model(const std::shared_ptr& f) override { const auto& orig_results = f->get_results(); From 13d60b1f0b4f40121bd7e58adf280258fcdf5175 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Thu, 19 Dec 2024 20:37:59 +0100 Subject: [PATCH 31/60] [STFT][CPU] Improve performance of STFT for CPU by reusage RDFT jit Executor (#26967) ### Details: - Improve performance of STFT for CPU plugin by reusage RDFT jit Executor - Use parallel loops in stft - No changes in the logic of the existing RDFT executor, RDFTExecutor::build function has been added to keep the RDFT details hidden in cpp as is. - Perf numbers collected within the ticket ### Tickets: - 156115 --------- Co-authored-by: Michal Lukaszewski --- src/plugins/intel_cpu/src/nodes/rdft.cpp | 41 ++++---- src/plugins/intel_cpu/src/nodes/rdft.h | 17 ++++ src/plugins/intel_cpu/src/nodes/stft.cpp | 114 +++++++++++++++++++++-- src/plugins/intel_cpu/src/nodes/stft.h | 4 + 4 files changed, 149 insertions(+), 27 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/rdft.cpp b/src/plugins/intel_cpu/src/nodes/rdft.cpp index 5220f037788123..d39aa9e23343fe 100644 --- a/src/plugins/intel_cpu/src/nodes/rdft.cpp +++ b/src/plugins/intel_cpu/src/nodes/rdft.cpp @@ -838,17 +838,20 @@ struct RDFTJitExecutor : public RDFTExecutor { rdftKernel.reset(new jit_dft_kernel_f32(isInverse, rdftType)); dftKernel.reset(new jit_dft_kernel_f32(isInverse, complex_to_complex)); vlen = cpu_isa_traits::vlen; - primDesc->setImplementationType(jit_avx512); + if (primDesc) + primDesc->setImplementationType(jit_avx512); } else if (mayiuse(cpu::x64::avx2)) { rdftKernel.reset(new jit_dft_kernel_f32(isInverse, rdftType)); dftKernel.reset(new jit_dft_kernel_f32(isInverse, complex_to_complex)); vlen = cpu_isa_traits::vlen; - primDesc->setImplementationType(jit_avx2); + if (primDesc) + primDesc->setImplementationType(jit_avx2); } else if (mayiuse(cpu::x64::sse41)) { rdftKernel.reset(new jit_dft_kernel_f32(isInverse, rdftType)); dftKernel.reset(new jit_dft_kernel_f32(isInverse, complex_to_complex)); vlen = cpu_isa_traits::vlen; - primDesc->setImplementationType(jit_sse42); + if (primDesc) + primDesc->setImplementationType(jit_sse42); } else { OPENVINO_THROW("Can't create RDFT kernel"); } @@ -1075,22 +1078,6 @@ struct RDFTRefExecutor : public RDFTExecutor { } }; -struct RDFTKey { - bool isInverse; - - size_t hash() const { - using namespace dnnl::impl::primitive_hashing; - - size_t seed = 0; - seed = hash_combine(seed, isInverse); - return seed; - } - - bool operator==(const RDFTKey& rhs) const { - return isInverse == rhs.isInverse; - } -}; - void RDFT::createPrimitive() { RDFTKey key{}; key.isInverse = inverse; @@ -1115,6 +1102,22 @@ void RDFT::createPrimitive() { Node::createPrimitive(); } + +std::shared_ptr RDFTExecutor::build(bool inverse, NodeDesc* primDesc) { + std::shared_ptr executor; +#if defined(OPENVINO_ARCH_X86_64) + using namespace dnnl::impl; + using namespace dnnl::impl::cpu::x64; + if (mayiuse(cpu::x64::sse41)) { + executor = std::make_shared(inverse, primDesc); + return executor; + } +#endif + executor = std::make_shared(inverse); + primDesc->setImplementationType(ref_any); + return executor; +} + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/rdft.h b/src/plugins/intel_cpu/src/nodes/rdft.h index fccd6500a50f7c..0de2fa0095df81 100644 --- a/src/plugins/intel_cpu/src/nodes/rdft.h +++ b/src/plugins/intel_cpu/src/nodes/rdft.h @@ -4,6 +4,7 @@ #pragma once +#include "common/primitive_hashing_utils.hpp" #include "kernels/x64/rdft_kernel.hpp" #include "node.h" @@ -30,6 +31,8 @@ struct RDFTExecutor { const std::vector& outputShape, const std::vector& axes); + static std::shared_ptr build(bool inverse, NodeDesc* primDesc = nullptr); + protected: bool isInverse; @@ -125,6 +128,20 @@ class RDFT : public Node { bool isSignalSizesConstant = false; }; +struct RDFTKey { + bool isInverse; + + size_t hash() const { + size_t seed = 0; + seed = dnnl::impl::hash_combine(seed, isInverse); + return seed; + } + + bool operator==(const RDFTKey& rhs) const { + return isInverse == rhs.isInverse; + } +}; + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/stft.cpp b/src/plugins/intel_cpu/src/nodes/stft.cpp index 47855a7eff7399..31f3b673f38841 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.cpp +++ b/src/plugins/intel_cpu/src/nodes/stft.cpp @@ -4,6 +4,10 @@ #include "stft.h" +#include "cpu/x64/cpu_isa_traits.hpp" +#include "cpu/x64/jit_generator.hpp" +#include "nodes/common/cpu_memcpy.h" +#include "openvino/core/parallel.hpp" #include "openvino/core/type.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/stft.hpp" @@ -73,15 +77,95 @@ bool STFT::created() const { return getType() == Type::STFT; } +namespace { +static void transpose_out4d(const uint8_t* in, + uint8_t* out, + const VectorDims& in_shape, + const VectorDims& out_shape, + size_t elem_size) { + const std::vector axes_order{0, 2, 1, 3}; + parallel_for3d(out_shape[0], + out_shape[1], + out_shape[2], + [in, out, axes_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k) { + size_t in_indexes[3]; + in_indexes[axes_order[0]] = i; + in_indexes[axes_order[1]] = j; + in_indexes[axes_order[2]] = k; + size_t in_off = + ((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3]; + size_t out_off = ((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3]; + cpu_memcpy(out + out_off * elem_size, in + in_off * elem_size, out_shape[3] * elem_size); + }); +} +} // namespace + void STFT::execute(dnnl::stream strm) { - ov::reference::stft(getSrcDataAtPortAs(DATA_IDX), - getSrcDataAtPortAs(WINDOW_IDX), - getDstDataAtPortAs(0), - ov::Shape{getSrcMemoryAtPort(DATA_IDX)->getStaticDims()}, - ov::Shape{getSrcMemoryAtPort(WINDOW_IDX)->getStaticDims()}, - (getSrcDataAtPortAs(FRAME_SIZE_IDX))[0], - (getSrcDataAtPortAs(FRAME_STEP_IDX))[0], - m_transpose_frames); + const float* signal = getSrcDataAtPortAs(DATA_IDX); + const float* window = getSrcDataAtPortAs(WINDOW_IDX); + float* rdft_result = getDstDataAtPortAs(0); + const VectorDims& signal_shape = getSrcMemoryAtPort(DATA_IDX)->getStaticDims(); + const VectorDims& window_shape = getSrcMemoryAtPort(WINDOW_IDX)->getStaticDims(); + const int64_t frame_size = (getSrcDataAtPortAs(FRAME_SIZE_IDX))[0]; + const int64_t frame_step = (getSrcDataAtPortAs(FRAME_STEP_IDX))[0]; + + const auto is_signal_1D = signal_shape.size() == 1; + const size_t batch_size = is_signal_1D ? 1 : signal_shape[0]; + const size_t signal_axis = is_signal_1D ? 0 : 1; + const auto signal_length = signal_shape[signal_axis]; + const auto num_frames = static_cast((signal_length - frame_size) / frame_step) + 1; + const auto frame_size_dim = static_cast(frame_size); + const auto fft_out_shape = VectorDims{static_cast((frame_size_dim / 2) + 1), 2}; + const auto fft_out_shape_size = shape_size(fft_out_shape); + + const auto window_length = window_shape[0] < frame_size_dim ? window_shape[0] : frame_size_dim; + std::vector pad_window(frame_size, 0); + cpu_parallel_memcpy(pad_window.data() + (frame_size_dim - window_length) / 2, + window, + sizeof(float) * window_shape[0]); + + float* dst = rdft_result; + const auto stft_shape = VectorDims{batch_size, num_frames, fft_out_shape[0], fft_out_shape[1]}; + if (m_transpose_frames) { // Store intermediate results + MemoryPtr dst_mem = + getScratchPadMem(std::make_shared(ov::element::f32, Shape{stft_shape})); + dst = dst_mem->getDataAs(); + } + + parallel_for2d(batch_size, num_frames, [&](size_t batch, size_t frame_idx) { + size_t batch_in_start = batch * signal_length; + size_t batch_frames_out = batch * num_frames; + + const auto frame_start = batch_in_start + frame_idx * frame_step; + const auto frame_end = frame_start + frame_size; + std::vector signal_slice(signal + frame_start, signal + frame_end); + std::transform(signal_slice.begin(), + signal_slice.end(), + pad_window.begin(), + signal_slice.begin(), + std::multiplies()); + + const auto result_idx = (batch_frames_out + frame_idx) * fft_out_shape_size; + auto twiddles = rdft_executor->generateTwiddles({static_cast(signal_slice.size())}, fft_out_shape, {0}); + rdft_executor->execute(signal_slice.data(), + dst + result_idx, + twiddles, + 1, + {0}, + {static_cast(frame_size)}, + {frame_size_dim}, + fft_out_shape, + {1}, + {2, 1}); + }); + if (m_transpose_frames) { + const auto stft_transp_out_shape = VectorDims{batch_size, fft_out_shape[0], num_frames, fft_out_shape[1]}; + transpose_out4d(reinterpret_cast(dst), + reinterpret_cast(rdft_result), + stft_shape, + stft_transp_out_shape, + sizeof(float)); + } } void STFT::executeDynamicImpl(dnnl::stream strm) { @@ -92,6 +176,20 @@ bool STFT::needShapeInfer() const { return !(m_is_frame_size_const && m_is_frame_step_const) || Node::needShapeInfer(); } +void STFT::createPrimitive() { + RDFTKey key{}; + key.isInverse = false; + auto buildExecutor = [&](const RDFTKey& key) -> std::shared_ptr { + return RDFTExecutor::build(key.isInverse, getSelectedPrimitiveDescriptor()); + }; + + auto cache = context->getParamsCache(); + auto result = cache->getOrCreate(key, buildExecutor); + rdft_executor = result.first; + + Node::createPrimitive(); +} + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/stft.h b/src/plugins/intel_cpu/src/nodes/stft.h index 7b1684cae4b674..608e14661910e2 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.h +++ b/src/plugins/intel_cpu/src/nodes/stft.h @@ -7,6 +7,7 @@ #include #include "node.h" +#include "rdft.h" namespace ov { namespace intel_cpu { @@ -21,6 +22,7 @@ class STFT : public Node { bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; bool needPrepareParams() const override; + void createPrimitive() override; void execute(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override; @@ -35,6 +37,8 @@ class STFT : public Node { /// STFT params bool m_transpose_frames = false; + // RDFT executor + std::shared_ptr rdft_executor = nullptr; bool m_is_frame_size_const = false; bool m_is_frame_step_const = false; From 541b3cfb818ff06decc15b6d9488740e0424d0bb Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Fri, 20 Dec 2024 09:25:07 +0400 Subject: [PATCH 32/60] [Snippets][AArch64] Enabled Ceiling and FloorMod tokenization (#28092) ### Details: - *The PR enabled Snippets tokenization for ops `Ceiling` (activation) and `FloorMod` (eltwise):* - *Updated tokenization callback* - *Registered the operations with the corresponding emitters in `CPUGenerator`* - *Added tests* - *This PR is example for the further GFIs'* ### Tickets: - *N/A* --- .../src/emitters/snippets/aarch64/cpu_generator.cpp | 2 ++ .../src/transformations/transformation_pipeline.cpp | 7 ++++--- .../custom/single_layer_tests/classes/activation.cpp | 1 + .../custom/single_layer_tests/classes/eltwise.cpp | 8 ++++---- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp index 95698f8ac78bb0..0f6b2c24c13df7 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp @@ -138,10 +138,12 @@ CPUTargetMachine::CPUTargetMachine(dnnl::impl::cpu::aarch64::cpu_isa_t host_isa) // unary jitters[ov::op::v0::Abs::get_type_info_static()] = CREATE_CPU_EMITTER(jit_abs_emitter); + jitters[ov::op::v0::Ceiling::get_type_info_static()] = CREATE_CPU_EMITTER(jit_ceiling_emitter); jitters[ov::op::v0::Clamp::get_type_info_static()] = CREATE_CPU_EMITTER(jit_clamp_emitter); jitters[ov::op::v0::Elu::get_type_info_static()] = CREATE_CPU_EMITTER(jit_elu_emitter); jitters[ov::op::v0::Exp::get_type_info_static()] = CREATE_CPU_EMITTER(jit_exp_emitter); jitters[ov::op::v0::Floor::get_type_info_static()] = CREATE_CPU_EMITTER(jit_floor_emitter); + jitters[ov::op::v1::FloorMod::get_type_info_static()] = CREATE_CPU_EMITTER(jit_floor_mod_emitter); jitters[ov::op::v0::Gelu::get_type_info_static()] = CREATE_CPU_EMITTER(jit_gelu_erf_emitter); jitters[ov::op::v7::Gelu::get_type_info_static()] = CREATE_GELU_V7_EMITTER(jit_gelu_erf_emitter, jit_gelu_tanh_emitter); diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 13e890f6339e81..a63377312ecb95 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -1117,9 +1117,10 @@ void Transformations::MainSnippets(void) { auto is_supported_op = [](const std::shared_ptr& n) -> bool { #if defined(OPENVINO_ARCH_ARM64) return (ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp index 57f098e1f234d2..0f63a7517b5745 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp @@ -267,6 +267,7 @@ const std::map>>& activat static const std::map>> activationTypes { {Abs, {{}}}, {Exp, {{}}}, + {Ceiling, {{}}}, {Clamp, {{-2.0f, 2.0f}}}, {Elu, {{0.1f}}}, {Floor, {{}}}, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp index 1696f35fc1bc4a..3f48b1f0b1e976 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp @@ -264,7 +264,8 @@ std::string EltwiseLayerCPUTest::getPrimitiveType(const utils::EltwiseTypes& elt return "jit"; } #endif - if (eltwise_type == utils::EltwiseTypes::MOD) { + if (eltwise_type == utils::EltwiseTypes::FLOOR_MOD || + eltwise_type == utils::EltwiseTypes::MOD) { return "ref"; } else { return "acl"; @@ -317,10 +318,8 @@ const std::vector& eltwiseOpTypesBinInp() { #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) utils::EltwiseTypes::SUBTRACT, // TODO: Fix CVS-105430 utils::EltwiseTypes::DIVIDE, // TODO: Fix CVS-105430 - utils::EltwiseTypes::FLOOR_MOD, // TODO: Fix CVS-111875 -#elif defined(OPENVINO_ARCH_ARM64) - utils::EltwiseTypes::FLOOR_MOD, #endif + utils::EltwiseTypes::FLOOR_MOD, utils::EltwiseTypes::SQUARED_DIFF, utils::EltwiseTypes::MOD, }; @@ -331,6 +330,7 @@ const std::vector& eltwiseOpTypesBinInpSnippets() { static const std::vector eltwiseOpTypesBinInp = { utils::EltwiseTypes::ADD, utils::EltwiseTypes::MULTIPLY, + utils::EltwiseTypes::FLOOR_MOD, utils::EltwiseTypes::MOD, }; return eltwiseOpTypesBinInp; From 921f0a9d452f2fd6ac88fcfdfbd09e49004f46e5 Mon Sep 17 00:00:00 2001 From: Vishniakov Nikolai Date: Fri, 20 Dec 2024 07:04:20 +0100 Subject: [PATCH 33/60] [OV JS] Update openvino-node package version to 2024.6.0 (#28148) ### Details: - update openvino-node package version to 2024.6.0 - update openvino-node to 2024.6.0 in samples --- samples/js/node/package-lock.json | 8 ++++---- samples/js/node/package.json | 2 +- src/bindings/js/node/package-lock.json | 4 ++-- src/bindings/js/node/package.json | 3 +-- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/samples/js/node/package-lock.json b/samples/js/node/package-lock.json index 020cec71ea3103..eb7369b10a7578 100644 --- a/samples/js/node/package-lock.json +++ b/samples/js/node/package-lock.json @@ -15,7 +15,7 @@ "args": "^5.0.3", "eslint": "^8.39.0", "https-proxy-agent": "^7.0.2", - "openvino-node": "^2024.5.0-0" + "openvino-node": "^2024.6.0" }, "engines": { "node": ">=21.0.0" @@ -1920,9 +1920,9 @@ } }, "node_modules/openvino-node": { - "version": "2024.5.0-0", - "resolved": "https://registry.npmjs.org/openvino-node/-/openvino-node-2024.5.0-0.tgz", - "integrity": "sha512-SgvHH3OdOXyMu5iZx0oBFWn7yIu3uB54IIfmXFKlyhHbSjO+3ph+DauUdlUkp2DGETR7bzq7+cPyyroeOF7qqQ==", + "version": "2024.6.0", + "resolved": "https://registry.npmjs.org/openvino-node/-/openvino-node-2024.6.0.tgz", + "integrity": "sha512-EQ0kdklsac3rfJTv6jUc9UIR0IG/YyIMOeq40+EYS0wozQ0mp4aQGBJRsT30SaEM4Ct797F9Mq+v9PjHxlJvcw==", "dev": true, "hasInstallScript": true, "license": "Apache-2.0", diff --git a/samples/js/node/package.json b/samples/js/node/package.json index b3e12a265f0c77..8198d13c80e6a5 100644 --- a/samples/js/node/package.json +++ b/samples/js/node/package.json @@ -8,7 +8,7 @@ "args": "^5.0.3", "eslint": "^8.39.0", "https-proxy-agent": "^7.0.2", - "openvino-node": "^2024.5.0-0", + "openvino-node": "^2024.6.0", "@napi-rs/canvas": "^0.1.59" }, "scripts": { diff --git a/src/bindings/js/node/package-lock.json b/src/bindings/js/node/package-lock.json index 27f426968e5b54..c202a824c37556 100644 --- a/src/bindings/js/node/package-lock.json +++ b/src/bindings/js/node/package-lock.json @@ -1,12 +1,12 @@ { "name": "openvino-node", - "version": "2024.5.0-0", + "version": "2024.6.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "openvino-node", - "version": "2024.5.0-0", + "version": "2024.6.0", "hasInstallScript": true, "license": "Apache-2.0", "os": [ diff --git a/src/bindings/js/node/package.json b/src/bindings/js/node/package.json index c0e4e03ddc4df6..0dab709718ae9b 100644 --- a/src/bindings/js/node/package.json +++ b/src/bindings/js/node/package.json @@ -1,6 +1,6 @@ { "name": "openvino-node", - "version": "2024.5.0-0", + "version": "2024.6.0", "description": "OpenVINO™ utils for using from Node.js environment", "repository": { "url": "git+https://github.com/openvinotoolkit/openvino.git", @@ -44,7 +44,6 @@ "tar-fs": "^3.0.4" }, "binary": { - "version": "2024.5.0", "module_path": "./bin/", "remote_path": "./repositories/openvino/nodejs_bindings/{version}/{platform}/", "package_name": "openvino_nodejs_bindings_{platform}_{version}_{arch}.tar.gz", From 71c8b8585394b732b4b26896582c59f784f896a8 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Fri, 20 Dec 2024 07:04:34 +0100 Subject: [PATCH 34/60] [RTTI] Apply OPENVINO_MATCHER_PASS_RTTI in transformations/op_convertions (#28150) ### Details: - Applied OPENVINO_MATCHER_PASS_RTTI helper macro in transformations/op_convertions. ### Tickets: - CVS-159567 --------- Signed-off-by: Tomasz Jankowski --- .../op_conversions/batch_norm_decomposition.hpp | 2 +- .../bidirectional_sequences_decomposition.hpp | 6 +++--- .../op_conversions/convert_avgpool_downgrade.hpp | 2 +- .../op_conversions/convert_batch_to_space.hpp | 2 +- .../convert_bitwise_to_logical_bool.hpp | 8 ++++---- .../op_conversions/convert_broadcast3.hpp | 2 +- .../op_conversions/convert_broadcast_to_tiles.hpp | 2 +- .../op_conversions/convert_convertlike.hpp | 2 +- .../op_conversions/convert_convertpromotetypes.hpp | 2 +- .../convert_deformable_conv_v8_to_v1.hpp | 2 +- .../op_conversions/convert_depth_to_space.hpp | 2 +- .../op_conversions/convert_divide.hpp | 4 ++-- .../convert_embedding_bag_offsets15_downgrade.hpp | 2 +- .../convert_embedding_bag_packed15_downgrade.hpp | 2 +- .../op_conversions/convert_fc_to_compressed.hpp | 3 ++- .../convert_fc_to_quantized_legacy.hpp | 2 +- .../op_conversions/convert_gather_0d.hpp | 2 +- .../op_conversions/convert_gather_downgrade.hpp | 4 ++-- .../convert_gather_to_compressed.hpp | 4 ++-- .../op_conversions/convert_gather_upgrade.hpp | 4 ++-- .../op_conversions/convert_gelu.hpp | 2 +- .../convert_gp9_to_gp_ie_internal.hpp | 2 +- .../convert_interpolate11_downgrade.hpp | 2 +- .../convert_interpolate1_to_interpolate4.hpp | 2 +- .../convert_matrix_nms_to_matrix_nms_ie.hpp | 2 +- .../op_conversions/convert_maxpool_downgrade.hpp | 4 ++-- .../op_conversions/convert_maxpool_upgrade.hpp | 2 +- .../convert_minimum_to_power_and_max.hpp | 2 +- .../transformations/op_conversions/convert_mod.hpp | 2 +- ...convert_multiclass_nms_to_multiclass_nms_ie.hpp | 2 +- .../convert_multiclass_nms_upgrade.hpp | 2 +- .../op_conversions/convert_mvn1_to_mvn6.hpp | 2 +- .../op_conversions/convert_negative.hpp | 2 +- .../convert_nms9_to_nms_ie_internal.hpp | 2 +- .../convert_nms_rotated_to_nms_ie_internal.hpp | 2 +- .../convert_nms_to_nms_ie_internal.hpp | 2 +- .../op_conversions/convert_pad12_downgrade.hpp | 2 +- .../op_conversions/convert_pad_to_group_conv.hpp | 2 +- .../convert_previous_nms_to_nms_5.hpp | 6 +++--- .../convert_previous_nms_to_nms_9.hpp | 8 ++++---- .../op_conversions/convert_prior_box_v8_to_v0.hpp | 2 +- .../op_conversions/convert_reduce_to_pooling.hpp | 2 ++ .../op_conversions/convert_reduce_to_reshape.hpp | 2 ++ .../op_conversions/convert_roi_align_v3_to_v9.hpp | 2 +- .../op_conversions/convert_roi_align_v9_to_v3.hpp | 2 +- .../convert_scatter_elements_to_scatter.hpp | 2 +- ...convert_scatter_elements_update12_downgrade.hpp | 2 +- .../convert_scatter_nd_update15_downgrade.hpp | 2 +- .../convert_sequences_to_tensor_iterator.hpp | 6 +++--- .../op_conversions/convert_shapeof3.hpp | 2 +- .../op_conversions/convert_shuffle_channels3.hpp | 2 +- .../convert_slice_to_strided_slice.hpp | 2 +- .../op_conversions/convert_slicescatter.hpp | 2 +- .../op_conversions/convert_softmax_downgrade.hpp | 2 +- .../op_conversions/convert_softmax_upgrade.hpp | 2 +- .../op_conversions/convert_space_to_batch.hpp | 2 +- .../op_conversions/convert_space_to_depth.hpp | 2 +- .../op_conversions/convert_squeeze15_downgrade.hpp | 2 +- .../op_conversions/convert_subtract.hpp | 4 ++-- .../op_conversions/convert_ti_to_sequences.hpp | 14 +++++++------- .../op_conversions/convert_topk11_downgrade.hpp | 2 +- .../op_conversions/convert_topk3.hpp | 2 +- .../op_conversions/convert_xor_to_logical_xor.hpp | 2 +- .../op_conversions/detection_output_downgrade.hpp | 2 +- .../op_conversions/detection_output_upgrade.hpp | 2 +- .../op_conversions/einsum_decomposition.hpp | 2 +- .../op_conversions/eye_decomposition.hpp | 2 +- .../op_conversions/fq_decomposition.hpp | 2 +- .../op_conversions/gelu7_downgrade.hpp | 2 +- .../group_normalization_decomposition.hpp | 2 +- .../op_conversions/gru_cell_decomposition.hpp | 2 +- .../op_conversions/hard_sigmoid_decomposition.hpp | 2 +- .../op_conversions/hsigmoid_decomposition.hpp | 2 +- .../op_conversions/hswish_decomposition.hpp | 2 +- .../op_conversions/log_softmax_decomposition.hpp | 2 +- .../op_conversions/lstm_cell_decomposition.hpp | 2 +- .../op_conversions/mvn6_decomposition.hpp | 2 +- .../op_conversions/normalize_l2_decomposition.hpp | 2 +- .../op_conversions/reduce_l1_decomposition.hpp | 2 +- .../op_conversions/reduce_l2_decomposition.hpp | 2 +- .../op_conversions/rnn_cell_decomposition.hpp | 2 +- .../scaled_dot_product_attention_decomposition.hpp | 2 +- .../simplify_ctc_greedy_decoder_seq_len.hpp | 2 +- .../op_conversions/softmax_decomposition.hpp | 2 +- .../op_conversions/softplus_decomposition.hpp | 2 +- .../op_conversions/softsign_decomposition.hpp | 2 +- .../op_conversions/unique_decomposition.hpp | 2 +- 87 files changed, 114 insertions(+), 109 deletions(-) diff --git a/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp index 9f4399804c50ff..362b946554e17d 100644 --- a/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API BatchNormDecomposition; class ov::pass::BatchNormDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BatchNormDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BatchNormDecomposition"); BatchNormDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp index 2a1234ef7c041e..f74f08a9b8c061 100644 --- a/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp @@ -31,7 +31,7 @@ class TRANSFORMATIONS_API BidirectionalRNNSequenceDecomposition; class ov::pass::BidirectionalLSTMSequenceDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BidirectionalLSTMSequenceDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BidirectionalLSTMSequenceDecomposition"); BidirectionalLSTMSequenceDecomposition(); }; @@ -43,7 +43,7 @@ class ov::pass::BidirectionalLSTMSequenceDecomposition : public ov::pass::Matche class ov::pass::BidirectionalGRUSequenceDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BidirectionalGRUSequenceDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BidirectionalGRUSequenceDecomposition"); BidirectionalGRUSequenceDecomposition(); }; @@ -55,7 +55,7 @@ class ov::pass::BidirectionalGRUSequenceDecomposition : public ov::pass::Matcher class ov::pass::BidirectionalRNNSequenceDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BidirectionalRNNSequenceDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BidirectionalRNNSequenceDecomposition"); BidirectionalRNNSequenceDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp index 40f4e32b60af7e..1db9706fb8c776 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertAvgPool14ToAvgPool1 : public MatcherPass { public: - OPENVINO_RTTI("ConvertAvgPool14ToAvgPool1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertAvgPool14ToAvgPool1"); ConvertAvgPool14ToAvgPool1(); }; } // namespace pass diff --git a/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp b/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp index f2792b467a96e0..2ecc2b8f9ced75 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp @@ -33,7 +33,7 @@ class TRANSFORMATIONS_API ConvertBatchToSpace; class ov::pass::ConvertBatchToSpace : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBatchToSpace", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBatchToSpace"); explicit ConvertBatchToSpace(bool convert_by_elements = true) : MatcherPass() { if (convert_by_elements) convert_batch_to_space_by_elements(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp index 897bf9ea70fac0..a5e130e2389af2 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp @@ -18,22 +18,22 @@ class TRANSFORMATIONS_API ConvertBitwiseXorToLogicalXor; class ov::pass::ConvertBitwiseAndToLogicalAnd : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseAndToLogicalAnd", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseAndToLogicalAnd"); ConvertBitwiseAndToLogicalAnd(); }; class ov::pass::ConvertBitwiseNotToLogicalNot : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseNotToLogicalNot", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseNotToLogicalNot"); ConvertBitwiseNotToLogicalNot(); }; class ov::pass::ConvertBitwiseOrToLogicalOr : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseOrToLogicalOr", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseOrToLogicalOr"); ConvertBitwiseOrToLogicalOr(); }; class ov::pass::ConvertBitwiseXorToLogicalXor : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseXorToLogicalXor", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseXorToLogicalXor"); ConvertBitwiseXorToLogicalXor(); }; /** diff --git a/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp index 06687b9e07ba01..7518f26f5d0cbc 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertBroadcast3; class ov::pass::ConvertBroadcast3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBroadcast3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBroadcast3"); ConvertBroadcast3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp b/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp index 5d5934b33e8216..5c1f374bcb724c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertBroadcastToTiles; class ov::pass::ConvertBroadcastToTiles : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBroadcastToTiles", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBroadcastToTiles"); ConvertBroadcastToTiles(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp index dfe5e5e7424d90..5952fc114b76fd 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertConvertLike; class ov::pass::ConvertConvertLike : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertConvertLike", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertConvertLike"); ConvertConvertLike(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp b/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp index c4d95f1211bea5..bb6a593b588ffc 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API ConvertConvertPromoteTypes; /// element type. class ov::pass::ConvertConvertPromoteTypes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertConvertPromoteTypes", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertConvertPromoteTypes"); ConvertConvertPromoteTypes(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp b/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp index 7db239e18d265a..37cf1935d85fbe 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertDeformableConv8To1; */ class ov::pass::ConvertDeformableConv8To1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDeformableConv8To1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDeformableConv8To1"); ConvertDeformableConv8To1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp b/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp index 481006b7a05822..dc6124c4f6be2e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertDepthToSpace; class ov::pass::ConvertDepthToSpace : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDepthToSpace", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDepthToSpace"); ConvertDepthToSpace(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp b/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp index 66442bbdc123da..e0526bfd815745 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp @@ -21,12 +21,12 @@ class TRANSFORMATIONS_API ConvertDivideWithConstant; class ov::pass::ConvertDivide : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDivide", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDivide"); ConvertDivide(); }; class ov::pass::ConvertDivideWithConstant : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDivideWithConstant", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDivideWithConstant"); ConvertDivideWithConstant(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp index 6ddbff4b7991b6..cbe39419438c52 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3"); ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp index a925928a28a3d8..eb8b5ddf93435c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3"); ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp index 1b6fcfb2bb3684..388de0610ce4fa 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp @@ -18,10 +18,11 @@ class TRANSFORMATIONS_API ConvertFullyConnectedToFullyConnectedCompressed; class ov::pass::ConvertFullyConnectedToFullyConnectedCompressed : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("ConvertFullyConnectedToFullyConnectedCompressed"); + using SupportsPredicate = std::function&, size_t, size_t, size_t)>; - OPENVINO_RTTI("ConvertFullyConnectedToFullyConnectedCompressed", "0"); ConvertFullyConnectedToFullyConnectedCompressed(const std::vector& supported_activation_types, const std::vector& supported_weights_types, SupportsPredicate supports_config = nullptr, diff --git a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp index 88990f92cb573c..b0f86055a4da17 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertFCToFCQuantizedLegacy; class ov::pass::ConvertFCToFCQuantizedLegacy : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertFullyConnectedToFullyConnectedQuantized", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertFCToFCQuantizedLegacy"); ConvertFCToFCQuantizedLegacy(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp index 75f9dd967d48c8..9a1798e4319fe1 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ConvertGather0D; */ class ov::pass::ConvertGather0D : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather0D", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather0D"); ConvertGather0D(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp index dfdbed915679b2..966175079ad30d 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp @@ -22,7 +22,7 @@ class TRANSFORMATIONS_API ConvertGather8ToGather7; */ class ov::pass::ConvertGather7ToGather1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather7ToGather1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather7ToGather1"); ConvertGather7ToGather1(); }; @@ -32,6 +32,6 @@ class ov::pass::ConvertGather7ToGather1 : public ov::pass::MatcherPass { */ class ov::pass::ConvertGather8ToGather7 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather8ToGather7", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather8ToGather7"); ConvertGather8ToGather7(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp index a916e9a4b91a44..edfdd3d5a07146 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertGatherToGatherCompressed; class ov::pass::ConvertGatherToGatherCompressed : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGatherToGatherCompressed", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGatherToGatherCompressed"); ConvertGatherToGatherCompressed(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp index ec8f8be61c3015..1c04190f4d7d0f 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp @@ -23,7 +23,7 @@ class TRANSFORMATIONS_API ConvertGather7ToGather8; */ class ov::pass::ConvertGather1ToGather7 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather1ToGather7", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather1ToGather7"); ConvertGather1ToGather7(); }; @@ -33,6 +33,6 @@ class ov::pass::ConvertGather1ToGather7 : public ov::pass::MatcherPass { */ class ov::pass::ConvertGather7ToGather8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather7ToGather8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather7ToGather8"); ConvertGather7ToGather8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp index 498872814f9cbb..dd9334381b3d8e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertGELU; class ov::pass::ConvertGELU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGELU", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGELU"); ConvertGELU(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp index 9fe62aad7fbd8c..a2c82137387172 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertGP9ToGPIEInternal; class ov::pass::ConvertGP9ToGPIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGP9ToGPIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGP9ToGPIEInternal"); ConvertGP9ToGPIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp index 8c136aa918f5f0..edca5bee3f215b 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp @@ -16,7 +16,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertInterpolate11ToInterpolate4 : public MatcherPass { public: - OPENVINO_RTTI("ConvertInterpolate11ToInterpolate4", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertInterpolate11ToInterpolate4"); ConvertInterpolate11ToInterpolate4(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp b/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp index f3b07c36962ccd..48822472ecbfd9 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ConvertInterpolate1ToInterpolate4; */ class ov::pass::ConvertInterpolate1ToInterpolate4 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertInterpolate1ToInterpolate4", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertInterpolate1ToInterpolate4"); ConvertInterpolate1ToInterpolate4(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp b/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp index 6d414139ad6f57..ea52625c9df1ae 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertMatrixNmsToMatrixNmsIE; class ov::pass::ConvertMatrixNmsToMatrixNmsIE : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMatrixNmsToMatrixNmsIE", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMatrixNmsToMatrixNmsIE"); ConvertMatrixNmsToMatrixNmsIE(bool force_i32_output_type = true); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp index 953cbab8a801a7..fc778773825c5c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp @@ -22,7 +22,7 @@ class TRANSFORMATIONS_API ConvertMaxPool14ToMaxPool8; */ class ov::pass::ConvertMaxPool8ToMaxPool1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMaxPool8ToMaxPool1"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMaxPool8ToMaxPool1"); ConvertMaxPool8ToMaxPool1(); }; @@ -32,6 +32,6 @@ class ov::pass::ConvertMaxPool8ToMaxPool1 : public ov::pass::MatcherPass { */ class ov::pass::ConvertMaxPool14ToMaxPool8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMaxPool14ToMaxPool8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMaxPool14ToMaxPool8"); ConvertMaxPool14ToMaxPool8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp index 6e7eed21342584..538b04bc9e254c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertMaxPool1ToMaxPool8; class ov::pass::ConvertMaxPool1ToMaxPool8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMaxPool1ToMaxPool8"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMaxPool1ToMaxPool8"); ConvertMaxPool1ToMaxPool8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp b/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp index d092ffec29d8c4..95e9a8e4f171db 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertMinimum; class ov::pass::ConvertMinimum : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMinimum", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMinimum"); ConvertMinimum(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp b/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp index 0fbd3bba723ecb..8c6cedbd67d635 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertMod; class ov::pass::ConvertMod : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMod", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMod"); ConvertMod(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp index 361d2352928b7e..8f003a05f874dd 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertMulticlassNmsToMulticlassNmsIE; class ov::pass::ConvertMulticlassNmsToMulticlassNmsIE : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("public", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMulticlassNmsToMulticlassNmsIE"); ConvertMulticlassNmsToMulticlassNmsIE(bool force_i32_output_type = true); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp index 1f3e9e9b5caf08..425ec26e78ccad 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertMulticlassNms8ToMulticlassNms9; class ov::pass::ConvertMulticlassNms8ToMulticlassNms9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMulticlassNms8ToMulticlassNms9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMulticlassNms8ToMulticlassNms9"); ConvertMulticlassNms8ToMulticlassNms9(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp b/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp index d958d166f7270d..9af05fa1f05891 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertMVN1ToMVN6; */ class ov::pass::ConvertMVN1ToMVN6 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMVN1ToMVN6", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMVN1ToMVN6"); ConvertMVN1ToMVN6(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp b/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp index b985f4bfe7f639..71df6767812c35 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertNegative; class ov::pass::ConvertNegative : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNegative", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNegative"); ConvertNegative(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp index fee970b6b44bfc..5bfd769e122e6b 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertNMS9ToNMSIEInternal; class ov::pass::ConvertNMS9ToNMSIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS9ToNMSIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS9ToNMSIEInternal"); ConvertNMS9ToNMSIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp index dcee03e513b38e..bf06c81e08e197 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertNMSRotatedToNMSIEInternal; class ov::pass::ConvertNMSRotatedToNMSIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMSRotatedToNMSIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMSRotatedToNMSIEInternal"); ConvertNMSRotatedToNMSIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp index ca205fe9078f7a..e8e34ce249d241 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertNMSToNMSIEInternal; class ov::pass::ConvertNMSToNMSIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMSToNMSIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMSToNMSIEInternal"); ConvertNMSToNMSIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp index 36ca9112b07829..263c71fb83dc76 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertPad12ToPad1 : public MatcherPass { public: - OPENVINO_RTTI("ConvertPad12ToPad1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertPad12ToPad1"); ConvertPad12ToPad1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp b/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp index a89386bd7048cb..7c3f44a439ee6e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp @@ -30,6 +30,6 @@ class TRANSFORMATIONS_API ConvertPadToGroupConvolution; class ov::pass::ConvertPadToGroupConvolution : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertPadToGroupConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertPadToGroupConvolution"); ConvertPadToGroupConvolution(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp index 8a4c0ee3110c1b..bc0d79220a5474 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp @@ -22,18 +22,18 @@ class TRANSFORMATIONS_API ConvertNMS4ToNMS5; class ov::pass::ConvertNMS1ToNMS5 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS1ToNMS5", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS1ToNMS5"); ConvertNMS1ToNMS5(); }; class ov::pass::ConvertNMS3ToNMS5 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS3ToNMS5", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS3ToNMS5"); ConvertNMS3ToNMS5(); }; class ov::pass::ConvertNMS4ToNMS5 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS4ToNMS5", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS4ToNMS5"); ConvertNMS4ToNMS5(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp index f77db7a03f8606..feff6577d6cb07 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp @@ -23,24 +23,24 @@ class TRANSFORMATIONS_API ConvertNMS5ToNMS9; class ov::pass::ConvertNMS1ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS1ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS1ToNMS9"); ConvertNMS1ToNMS9(); }; class ov::pass::ConvertNMS3ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS3ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS3ToNMS9"); ConvertNMS3ToNMS9(); }; class ov::pass::ConvertNMS4ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS4ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS4ToNMS9"); ConvertNMS4ToNMS9(); }; class ov::pass::ConvertNMS5ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS5ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS5ToNMS9"); ConvertNMS5ToNMS9(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp b/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp index 2725d789a83a70..435a96b8e1cbc7 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertPriorBox8To0; */ class ov::pass::ConvertPriorBox8To0 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertPriorBox8To0", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertPriorBox8To0"); ConvertPriorBox8To0(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp index 662660b926aa52..36d2b052243382 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp @@ -35,6 +35,8 @@ class TRANSFORMATIONS_API ConvertReduceSumToPooling; class ConvertReduceBase : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("ConvertReduceBase"); + template ov::matcher_pass_callback convert_reduce_to_pooling(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp index 15e303f0c26493..f020e768be2feb 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp @@ -32,6 +32,8 @@ class TRANSFORMATIONS_API ConvertReduceLogicalOrToReshape; class CvtReduceBase : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("CvtReduceBase"); + template ov::matcher_pass_callback convert_reduce_to_reshape(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp index 71f6becff0ba26..77c99e37b66533 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertROIAlign3To9; */ class ov::pass::ConvertROIAlign3To9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertROIAlign3To9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertROIAlign3To9"); ConvertROIAlign3To9(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp index d06dc424ff436e..11b9567e78eb3e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertROIAlign9To3; */ class ov::pass::ConvertROIAlign9To3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertROIAlign9To3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertROIAlign9To3"); ConvertROIAlign9To3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp index d0a738c14fab9c..f8a2eb828e97b9 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API ConvertScatterElementsToScatter; */ class ov::pass::ConvertScatterElementsToScatter : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertScatterElementsToScatter", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertScatterElementsToScatter"); ConvertScatterElementsToScatter(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp index 4af4e18c706e93..7e6d4613f109a8 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertScatterElementsUpdate12ToScatterElementsUpdate3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertScatterElementsUpdate12ToScatterElementsUpdate3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertScatterElementsUpdate12ToScatterElementsUpdate3"); ConvertScatterElementsUpdate12ToScatterElementsUpdate3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp index dfaab66e22501c..4af9172e6351cb 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp @@ -16,7 +16,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertScatterNDUpdate15ToScatterNDUpdate3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertScatterNDUpdate15ToScatterNDUpdate3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertScatterNDUpdate15ToScatterNDUpdate3"); ConvertScatterNDUpdate15ToScatterNDUpdate3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp b/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp index 44502b42174de6..46a7e8ff0317e9 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp @@ -29,7 +29,7 @@ class TRANSFORMATIONS_API ConvertSequenceToTensorIterator; class ov::pass::ConvertRNNSequenceToTensorIterator : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertRNNSequenceToTensorIterator", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertRNNSequenceToTensorIterator"); ConvertRNNSequenceToTensorIterator(); }; @@ -41,7 +41,7 @@ class ov::pass::ConvertRNNSequenceToTensorIterator : public ov::pass::MatcherPas class ov::pass::ConvertGRUSequenceToTensorIterator : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGRUSequenceToTensorIterator", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGRUSequenceToTensorIterator"); ConvertGRUSequenceToTensorIterator(); }; @@ -53,7 +53,7 @@ class ov::pass::ConvertGRUSequenceToTensorIterator : public ov::pass::MatcherPas class ov::pass::ConvertLSTMSequenceToTensorIterator : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertLSTMSequenceToTensorIterator", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertLSTMSequenceToTensorIterator"); ConvertLSTMSequenceToTensorIterator(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp index 0aceb9e99614fb..831ba981cb16d6 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertShapeOf3; class ov::pass::ConvertShapeOf3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertShapeOf3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertShapeOf3"); ConvertShapeOf3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp index 05b2d2607464b8..ac03068aa78298 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertShuffleChannels3; class ov::pass::ConvertShuffleChannels3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertShuffleChannels3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertShuffleChannels3"); ConvertShuffleChannels3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp b/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp index 8d396bca6ccd1e..b32c277b1a2b23 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API SliceToStridedSlice; */ class ov::pass::SliceToStridedSlice : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SliceToStridedSlice", "0"); + OPENVINO_MATCHER_PASS_RTTI("SliceToStridedSlice"); SliceToStridedSlice(bool use_shapes); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp b/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp index 020b4e236fcac5..58dd6dbc39ac49 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertSliceScatter; class ov::pass::ConvertSliceScatter : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSliceScatter", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSliceScatter"); ConvertSliceScatter(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp index 701f2cb94e9857..b8cd2907f82cdd 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertSoftMax8ToSoftMax1; */ class ov::pass::ConvertSoftMax8ToSoftMax1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSoftMax8ToSoftMax1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSoftMax8ToSoftMax1"); ConvertSoftMax8ToSoftMax1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp index 2164eac6052384..a7a0ef5b01aee1 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertSoftMax1ToSoftMax8; class ov::pass::ConvertSoftMax1ToSoftMax8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSoftMax1ToSoftMax8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSoftMax1ToSoftMax8"); ConvertSoftMax1ToSoftMax8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp b/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp index 97bda0273c522f..235a56b728876a 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp @@ -33,7 +33,7 @@ class TRANSFORMATIONS_API ConvertSpaceToBatch; class ov::pass::ConvertSpaceToBatch : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSpaceToBatch", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSpaceToBatch"); explicit ConvertSpaceToBatch(bool convert_by_elements = true) : MatcherPass() { if (convert_by_elements) convert_space_to_batch_by_elements(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp b/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp index da97add26411a8..6edf57f4c254fe 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertSpaceToDepth; class ov::pass::ConvertSpaceToDepth : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSpaceToDepth", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSpaceToDepth"); ConvertSpaceToDepth(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp index c2ebfbc0f3138b..d35858ce10b3f4 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertSqueeze15ToSqueeze0 : public MatcherPass { public: - OPENVINO_RTTI("ConvertSqueeze15ToSqueeze0", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSqueeze15ToSqueeze0"); ConvertSqueeze15ToSqueeze0(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp b/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp index 5b2a5d0c36abdf..c6baf673efe95c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp @@ -21,12 +21,12 @@ class TRANSFORMATIONS_API ConvertSubtractWithConstant; class ov::pass::ConvertSubtract : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSubtract", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSubtract"); ConvertSubtract(); }; class ov::pass::ConvertSubtractWithConstant : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSubtractWithConstant", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSubtractWithConstant"); ConvertSubtractWithConstant(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp index e729d735c652d1..fb53cc81743ec4 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp @@ -36,7 +36,7 @@ class TRANSFORMATIONS_API FuseLSTMSequencesToBidirectionalLSTMSequence; class ov::pass::ConvertTensorIteratorToLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTensorIteratorToLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTensorIteratorToLSTMSequence"); ConvertTensorIteratorToLSTMSequence(); }; @@ -48,7 +48,7 @@ class ov::pass::ConvertTensorIteratorToLSTMSequence : public ov::pass::MatcherPa class ov::pass::ConvertTensorIteratorToRNNSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTensorIteratorToRNNSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTensorIteratorToRNNSequence"); ConvertTensorIteratorToRNNSequence(); }; @@ -60,7 +60,7 @@ class ov::pass::ConvertTensorIteratorToRNNSequence : public ov::pass::MatcherPas class ov::pass::ConvertTensorIteratorToGRUSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTensorIteratorToGRUSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTensorIteratorToGRUSequence"); ConvertTensorIteratorToGRUSequence(); }; @@ -72,13 +72,13 @@ class ov::pass::ConvertTensorIteratorToSequence : public GraphRewrite { class ov::pass::ConvertLoopWithSlicedInputConcatOutputToLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertLoopWithSlicedInputConcatOutputToLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertLoopWithSlicedInputConcatOutputToLSTMSequence"); ConvertLoopWithSlicedInputConcatOutputToLSTMSequence(); }; class ov::pass::ConvertLoopWithScatterUpdateToLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertLoopWithScatterUpdateToLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertLoopWithScatterUpdateToLSTMSequence"); ConvertLoopWithScatterUpdateToLSTMSequence(); }; @@ -101,7 +101,7 @@ class ov::pass::ConvertLoopToLSTMSequence : public ov::pass::GraphRewrite { */ class ov::pass::FuseReverseLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseReverseLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("FuseReverseLSTMSequence"); FuseReverseLSTMSequence(); }; @@ -111,6 +111,6 @@ class ov::pass::FuseReverseLSTMSequence : public ov::pass::MatcherPass { */ class ov::pass::FuseLSTMSequencesToBidirectionalLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseLSTMSequencesToBidirectionalLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("FuseLSTMSequencesToBidirectionalLSTMSequence"); FuseLSTMSequencesToBidirectionalLSTMSequence(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp index e6e8340e45df94..fd5be9a10c10ef 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertTopK11ToTopK3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertTopK11ToTopK3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTopK11ToTopK3"); ConvertTopK11ToTopK3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp index 0b61bf85bff28f..0f3f6ea160f825 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertTopK3; class ov::pass::ConvertTopK3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTopK3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTopK3"); ConvertTopK3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp b/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp index ee7bf0f55615bd..0940f5f1a67b51 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertXorToLogicalXor; */ class ov::pass::ConvertXorToLogicalXor : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertXorToLogicalXor", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertXorToLogicalXor"); ConvertXorToLogicalXor(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp index 8f4e6ad237a6b0..b730f78b2291e8 100644 --- a/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertDetectionOutput8ToDetectionOutput1; */ class ov::pass::ConvertDetectionOutput8ToDetectionOutput1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDetectionOutput8ToDetectionOutput1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDetectionOutput8ToDetectionOutput1"); ConvertDetectionOutput8ToDetectionOutput1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp index 8693fb206ed2cd..de92f382af92b3 100644 --- a/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertDetectionOutput1ToDetectionOutput8; */ class ov::pass::ConvertDetectionOutput1ToDetectionOutput8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDetectionOutput1ToDetectionOutput8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDetectionOutput1ToDetectionOutput8"); ConvertDetectionOutput1ToDetectionOutput8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp index 78accf3f0b4877..e67367a2e93bab 100644 --- a/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API EinsumDecomposition; */ class ov::pass::EinsumDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EinsumDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("EinsumDecomposition"); EinsumDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp index 15c53bc9cf6f30..29913d429b8462 100644 --- a/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API EyeDecomposition; */ class ov::pass::EyeDecomposition : public MatcherPass { public: - OPENVINO_RTTI("EyeDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("EyeDecomposition"); EyeDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp index d938a9b70687e2..d099c268d2d7b0 100644 --- a/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp @@ -44,6 +44,6 @@ class TRANSFORMATIONS_API FakeQuantizeDecomposition; class ov::pass::FakeQuantizeDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FakeQuantizeDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("FakeQuantizeDecomposition"); FakeQuantizeDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp index d08c3877de26a1..be26ea4c14625c 100644 --- a/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API Gelu7Downgrade; */ class ov::pass::Gelu7Downgrade : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("Gelu7Downgrade", "0"); + OPENVINO_MATCHER_PASS_RTTI("Gelu7Downgrade"); Gelu7Downgrade(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp index 8df9c220a9de75..47c908eb8ac6f5 100644 --- a/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp @@ -18,6 +18,6 @@ class TRANSFORMATIONS_API GroupNormalizationDecomposition; // This transformation expresses GroupNormalization with a sub-graph of OpenVINO operations class ov::pass::GroupNormalizationDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GroupNormalizationDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("GroupNormalizationDecomposition"); GroupNormalizationDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp index ce4387293f97ad..1060d266b7fd0a 100644 --- a/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp @@ -36,6 +36,6 @@ class TRANSFORMATIONS_API GRUCellDecomposition; class ov::pass::GRUCellDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GRUCellDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("GRUCellDecomposition"); GRUCellDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp index b635a08350922b..b7636aeb5d6d68 100644 --- a/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API HardSigmoidDecomposition; */ class ov::pass::HardSigmoidDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HardSigmoidDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("HardSigmoidDecomposition"); HardSigmoidDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp index 5a455c37253afe..e597bc75f8600d 100644 --- a/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API HSigmoidDecomposition; */ class ov::pass::HSigmoidDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidDecomposition"); HSigmoidDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp index e6168109d3c89b..8d05edc3afa650 100644 --- a/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API HSwishDecomposition; */ class ov::pass::HSwishDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSwishDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSwishDecomposition"); HSwishDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp index f972a0cdd8fa76..84e444c12fccec 100644 --- a/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API LogSoftmaxDecomposition; */ class ov::pass::LogSoftmaxDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LogSoftmaxDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("LogSoftmaxDecomposition"); LogSoftmaxDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp index 08b109533738cf..94077e21b472cb 100644 --- a/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp @@ -37,6 +37,6 @@ class TRANSFORMATIONS_API LSTMCellDecomposition; class ov::pass::LSTMCellDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LSTMCellDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("LSTMCellDecomposition"); LSTMCellDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp index f8335f5f4546f2..abacd721fa23a7 100644 --- a/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API MVN6Decomposition; */ class ov::pass::MVN6Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MVN6Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("MVN6Decomposition"); MVN6Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp index ae058a3e8cba2f..18f071f8f6fb03 100644 --- a/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API NormalizeL2Decomposition; */ class ov::pass::NormalizeL2Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NormalizeL2Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("NormalizeL2Decomposition"); NormalizeL2Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp index 506853e003e33a..1d8cc22089a93d 100644 --- a/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API ReduceL1Decomposition; */ class ov::pass::ReduceL1Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReduceL1Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReduceL1Decomposition"); ReduceL1Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp index dab02ff58f2f5c..8bf9955d523593 100644 --- a/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API ReduceL2Decomposition; */ class ov::pass::ReduceL2Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReduceL2Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReduceL2Decomposition"); ReduceL2Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp index ce651a47510577..475f2bf1cdf73e 100644 --- a/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API RNNCellDecomposition; class ov::pass::RNNCellDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RNNCellDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("RNNCellDecomposition"); RNNCellDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp index e52c6ba46838b9..f86175c617c0fc 100644 --- a/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp @@ -18,7 +18,7 @@ class TRANSFORMATIONS_API ScaledDotProductAttentionDecomposition; class ov::pass::ScaledDotProductAttentionDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ScaledDotProductAttentionDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("ScaledDotProductAttentionDecomposition"); ScaledDotProductAttentionDecomposition(); std::shared_ptr decompose(std::shared_ptr node); }; diff --git a/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp b/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp index 70cd9bbe0162c5..e900f3c3d213f7 100644 --- a/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp +++ b/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp @@ -37,6 +37,6 @@ class TRANSFORMATIONS_API SimplifyCTCGreedyDecoderSeqLen; */ class ov::pass::SimplifyCTCGreedyDecoderSeqLen : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SimplifyCTCGreedyDecoderSeqLen", "0"); + OPENVINO_MATCHER_PASS_RTTI("SimplifyCTCGreedyDecoderSeqLen"); SimplifyCTCGreedyDecoderSeqLen(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp index a1ab4be69fdc62..0e13fd516b13e7 100644 --- a/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp @@ -69,6 +69,6 @@ class TRANSFORMATIONS_API SoftmaxDecomposition; class ov::pass::SoftmaxDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftmaxDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftmaxDecomposition"); SoftmaxDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp index 3a7cca9ff5c8b1..ef52b8ab922d3b 100644 --- a/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API SoftPlusDecomposition; */ class ov::pass::SoftPlusDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftPlusDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftPlusDecomposition"); SoftPlusDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp index bfd026f21eb111..300761470b73ce 100644 --- a/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp @@ -40,6 +40,6 @@ class TRANSFORMATIONS_API SoftSignDecomposition; class ov::pass::SoftSignDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftSignDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftSignDecomposition"); SoftSignDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp index 02f889e7d3122a..6169a0a512e4f3 100644 --- a/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API UniqueDecomposition; // This transformation expresses Unique with a sub-graph of OpenVINO operations class ov::pass::UniqueDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UniqueDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("UniqueDecomposition"); UniqueDecomposition(); }; From cbecdad53d50a3e9db14f9833b9eaecb378d0911 Mon Sep 17 00:00:00 2001 From: Mingyu Kim Date: Fri, 20 Dec 2024 15:19:31 +0900 Subject: [PATCH 35/60] [GPU] minor comment (#28086) ### Details: - Follow-up from #28077 for an additional comment --- src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 44d68740a0dfb7..53ab9aa188b7aa 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -996,6 +996,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { } // AZP does not support 8bit weight + // XXX: This is currently wrapped as GPU_DEBUG_IF as dynamic_quantize_asym is not exposed through public API. GPU_DEBUG_IF(debug_config->dynamic_quantize_asym && (root->get_input_element_type(1) == ov::element::i8 || root->get_input_element_type(1) == ov::element::u8)) { GPU_DEBUG_TRACE << root->get_friendly_name() << " dyn_quan is turned off: asym quantization does not support 8bit weight" << std::endl; From 375db365725f1cdef0af96c0d3b1b011ddc05830 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 20 Dec 2024 10:34:30 +0400 Subject: [PATCH 36/60] [ONNX] Removed redundant memory copying for small data types when MMAP is using (#28120) ### Details: - Removed a redundant memory copying for data types less than 4 bytes when MMAP is using - Implementation verified by a previously added tests named onnx_external_data_* ### Tickets: - 159161 --- .../onnx/frontend/src/core/tensor.cpp | 96 +++++++++++++ .../onnx/frontend/src/core/tensor.hpp | 136 ++++-------------- .../src/utils/tensor_external_data.hpp | 8 ++ 3 files changed, 128 insertions(+), 112 deletions(-) diff --git a/src/frontends/onnx/frontend/src/core/tensor.cpp b/src/frontends/onnx/frontend/src/core/tensor.cpp index b23f6c55253ac1..1c3a943e6481d1 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.cpp +++ b/src/frontends/onnx/frontend/src/core/tensor.cpp @@ -266,6 +266,102 @@ std::vector Tensor::get_data() const { ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "STRING"); } +std::shared_ptr Tensor::get_ov_constant() const { + if (m_tensor_proto->has_segment()) { + FRONT_END_THROW("Loading segments isn't supported"); + } + std::shared_ptr constant{nullptr}; + ov::element::Type ov_type = get_ov_type(); + size_t element_count = get_data_size(); + if (ov::element::is_nibble_type(ov_type)) { + element_count *= 2; // Each byte contains 2 data items + } + if (has_external_data()) { + const auto ext_data = detail::TensorExternalData(*m_tensor_proto); + if (m_mmap_cache) { + constant = + std::make_shared(ov_type, + m_shape, + ext_data.load_external_mmap_data(m_model_dir, m_mmap_cache)); + } else { + constant = + std::make_shared(ov_type, m_shape, ext_data.load_external_data(m_model_dir)); + } + // ext_data.size() might be zero, need to recalc by using info about actually red data (for byte-size) + element_count = constant->get_byte_size() / ov_type.size(); + if (ov::element::is_nibble_type(ov_type)) { + element_count *= 2; // Each byte contains 2 data items, so byte size must be multiplicated + } + if (element_count != ov::shape_size(m_shape) || + (ext_data.size() != 0 && constant->get_byte_size() != ext_data.size())) { + throw error::invalid_external_data( + "The size of the external data file does not match the byte size of an initializer '" + get_name() + + "' in the model"); + } + } else if (element_count == shape_size(m_shape)) { + switch (m_tensor_proto->data_type()) { + case TensorProto_DataType::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_UINT32: + case TensorProto_DataType::TensorProto_DataType_UINT64: + constant = std::make_shared(ov_type, m_shape, get_data_ptr()); + break; + case TensorProto_DataType::TensorProto_DataType_INT4: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_INT8: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_INT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_UINT4: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_UINT8: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_UINT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_BOOL: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_FLOAT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_STRING: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + default: + ONNX_UNSUPPORTED_DATA_TYPE( + m_tensor_proto->data_type(), + "BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, " + "UINT4, UINT8, UINT16, UINT32, UINT64, STRING"); + } + } else if (element_count == 0 && m_shape.size() == 0) { + constant = common::make_failsafe_constant(ov_type); + } else { + FRONT_END_THROW("Tensor shape doesn't match data size"); + } + + if (m_tensor_proto->has_name()) { + constant->set_friendly_name(get_name()); + } + return constant; +} + } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/tensor.hpp b/src/frontends/onnx/frontend/src/core/tensor.hpp index a63cdfd1906bb0..7321311e4b4775 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/tensor.hpp @@ -186,119 +186,9 @@ class Tensor { return static_cast(m_tensor_proto->data_type()); } - std::shared_ptr get_ov_constant() const { - if (m_tensor_proto->has_segment()) { - FRONT_END_THROW("Loading segments isn't supported"); - } - switch (m_tensor_proto->data_type()) { - case TensorProto_DataType::TensorProto_DataType_BOOL: - return make_ov_constant(ov::element::boolean); - case TensorProto_DataType::TensorProto_DataType_FLOAT: - return make_ov_constant(ov::element::f32); - case TensorProto_DataType::TensorProto_DataType_FLOAT16: - return make_ov_constant(ov::element::f16); - case TensorProto_DataType::TensorProto_DataType_DOUBLE: - return make_ov_constant(ov::element::f64); - case TensorProto_DataType::TensorProto_DataType_INT4: - return make_ov_constant(ov::element::i4); - case TensorProto_DataType::TensorProto_DataType_INT8: - return make_ov_constant(ov::element::i8); - case TensorProto_DataType::TensorProto_DataType_INT16: - return make_ov_constant(ov::element::i16); - case TensorProto_DataType::TensorProto_DataType_INT32: - return make_ov_constant(ov::element::i32); - case TensorProto_DataType::TensorProto_DataType_INT64: - return make_ov_constant(ov::element::i64); - case TensorProto_DataType::TensorProto_DataType_UINT4: - return make_ov_constant(ov::element::u4); - case TensorProto_DataType::TensorProto_DataType_UINT8: - return make_ov_constant(ov::element::u8); - case TensorProto_DataType::TensorProto_DataType_UINT16: - return make_ov_constant(ov::element::u16); - case TensorProto_DataType::TensorProto_DataType_UINT32: - return make_ov_constant(ov::element::u32); - case TensorProto_DataType::TensorProto_DataType_UINT64: - return make_ov_constant(ov::element::u64); - case TensorProto_DataType::TensorProto_DataType_BFLOAT16: - return make_ov_constant(ov::element::bf16); - case TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN: - return make_ov_constant(ov::element::f8e4m3); - case TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2: - return make_ov_constant(ov::element::f8e5m2); - case TensorProto_DataType::TensorProto_DataType_STRING: - return make_ov_constant(ov::element::string); - default: - ONNX_UNSUPPORTED_DATA_TYPE( - m_tensor_proto->data_type(), - "BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, " - "UINT4, UINT8, UINT16, UINT32, UINT64, STRING"); - } - } + std::shared_ptr get_ov_constant() const; private: - template ::value || std::is_same::value || - std::is_same::value || std::is_same::value || - std::is_same::value, - bool>::type = true> - std::shared_ptr make_ov_constant(const ov::element::Type& type) const { - std::shared_ptr constant{nullptr}; - size_t data_size = get_data_size(); - if (has_external_data()) { - const auto ext_data = detail::TensorExternalData(*m_tensor_proto); - if (m_mmap_cache) { - constant = - std::make_shared(type, - m_shape, - ext_data.load_external_mmap_data(m_model_dir, m_mmap_cache)); - } else { - constant = - std::make_shared(type, m_shape, ext_data.load_external_data(m_model_dir)); - } - if (constant->get_byte_size() != ov::shape_size(m_shape) * type.size()) { - throw error::invalid_external_data( - "The size of the external data file does not match the byte size of an initializer '" + get_name() + - "' in the model"); - } - } else if (data_size == shape_size(m_shape)) { - constant = std::make_shared(type, m_shape, get_data_ptr()); - } else if (data_size == 0 && m_shape.size() == 0) { - constant = common::make_failsafe_constant(type); - } else { - FRONT_END_THROW("Tensor shape doesn't match data size"); - } - - if (m_tensor_proto->has_name()) { - constant->set_friendly_name(get_name()); - } - return constant; - } - - template ::value && !std::is_same::value && - !std::is_same::value && !std::is_same::value && - !std::is_same::value, - bool>::type = true> - std::shared_ptr make_ov_constant(const ov::element::Type& type) const { - std::shared_ptr constant{nullptr}; - auto data = get_data(); - auto element_count = data.size(); - if (ov::element::is_nibble_type(type)) { - element_count *= 2; // Each byte contains 2 data items - } - if (element_count == shape_size(m_shape)) { - constant = std::make_shared(type, m_shape, data.data()); - } else if (element_count == 0 && m_shape.size() == 0) { - constant = common::make_failsafe_constant(type); - } else { - FRONT_END_THROW("Tensor shape doesn't match data size"); - } - if (m_tensor_proto->has_name()) { - constant->set_friendly_name(get_name()); - } - return constant; - } - bool has_external_data() const { return m_tensor_proto->has_data_location() && m_tensor_proto->data_location() == TensorProto_DataLocation::TensorProto_DataLocation_EXTERNAL; @@ -317,6 +207,9 @@ class Tensor { } const void* get_data_ptr() const { + if (has_external_data()) { + FRONT_END_THROW("Unexpected usage of method for externally stored data"); + } if (m_tensor_proto->has_raw_data()) { return m_tensor_proto->raw_data().data(); } @@ -336,6 +229,10 @@ class Tensor { } size_t get_data_size() const { + if (has_external_data()) { + const auto ext_data = detail::TensorExternalData(*m_tensor_proto); + return ext_data.size() / get_onnx_data_size(m_tensor_proto->data_type()); + } if (m_tensor_proto->has_raw_data()) { return m_tensor_proto->raw_data().size() / get_onnx_data_size(m_tensor_proto->data_type()); } @@ -352,8 +249,23 @@ class Tensor { return m_tensor_proto->double_data_size(); case TensorProto_DataType::TensorProto_DataType_STRING: return m_tensor_proto->string_data_size(); + case TensorProto_DataType::TensorProto_DataType_INT4: + case TensorProto_DataType::TensorProto_DataType_INT8: + case TensorProto_DataType::TensorProto_DataType_INT16: + case TensorProto_DataType::TensorProto_DataType_UINT4: + case TensorProto_DataType::TensorProto_DataType_UINT8: + case TensorProto_DataType::TensorProto_DataType_UINT16: + case TensorProto_DataType::TensorProto_DataType_BOOL: + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN: + case TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2: + return m_tensor_proto->int32_data_size(); } - ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, INT32, INT64, UINT64, DOUBLE, STRING"); + ONNX_INVALID_DATA_TYPE( + m_tensor_proto->data_type(), + "BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, " + "UINT4, UINT8, UINT16, UINT32, UINT64, STRING"); } const TensorProto* m_tensor_proto; diff --git a/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp b/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp index 983e53895c1148..e715a8e7e61cdc 100644 --- a/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp +++ b/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp @@ -46,6 +46,14 @@ class TensorExternalData { /// \return State of TensorExternalData as string representation std::string to_string() const; + /// \brief Object contains a data length after construction. Method allows read-only access to this + /// information. + /// + /// \return Returns a stored data size in bytes + uint64_t size() const { + return m_data_length; + } + private: std::string m_data_location{}; uint64_t m_offset = 0; From ff8146ef305fe07a5c0dc0a30006d96112af600c Mon Sep 17 00:00:00 2001 From: Alicja Miloszewska Date: Fri, 20 Dec 2024 10:05:45 +0100 Subject: [PATCH 37/60] [Py OV] Extend Model to utilize with-expressions (#27191) ### Details: - Implement `__enter__` and `__exit__` to create class-based context manager - Remove inheritance from ModelBase in ie_api.Model and make it an attribute. - add private property __model that stores `_pyopenvino.Model`. In `_pyopenvino` such attribute can be accessed as `_Model__model` because of name mangling - update pyAPI methods that have `std::shared_ptr` in their signatures. - add `test_model_with_statement` and `test_model_tempdir_fails` ### Motivation: On Windows reading `ov.Model` from temporary directory leads to `PermissionError`: ```python mem_model = generate_model_with_memory(input_shape=Shape([2, 1]), data_type=Type.f32) with tempfile.TemporaryDirectory() as model_save_dir: save_model(mem_model, f"{model_save_dir}/model.xml") model = Core().read_model(f"{model_save_dir}/model.xml") ``` ### Tickets: - CVS-106987 --------- Signed-off-by: Alicja Miloszewska --- src/bindings/python/src/openvino/_ov_api.py | 53 +++++++++++++++---- .../src/openvino/properties/_properties.py | 3 ++ .../src/openvino/test_utils/__init__.py | 2 +- .../src/openvino/test_utils/test_api.py | 10 ++++ .../core/offline_transformations.cpp | 37 ++++++++----- .../src/pyopenvino/frontend/frontend.cpp | 22 +++++--- .../pyopenvino/graph/attribute_visitor.cpp | 5 ++ .../python/src/pyopenvino/graph/model.cpp | 4 -- .../python/src/pyopenvino/graph/ops/if.cpp | 39 +++++++++----- .../python/src/pyopenvino/graph/ops/loop.cpp | 4 +- .../pyopenvino/graph/ops/tensor_iterator.cpp | 12 ++++- .../src/pyopenvino/graph/passes/manager.cpp | 12 +++-- .../graph/preprocess/pre_post_process.cpp | 20 ++++++- .../python/src/pyopenvino/pyopenvino.cpp | 34 +++++++----- .../src/pyopenvino/test_utils/CMakeLists.txt | 6 +-- .../python/src/pyopenvino/utils/utils.cpp | 12 +++++ .../python/src/pyopenvino/utils/utils.hpp | 2 + .../python/tests/test_runtime/test_model.py | 44 +++++++++++++-- 18 files changed, 247 insertions(+), 74 deletions(-) create mode 100644 src/bindings/python/src/openvino/test_utils/test_api.py diff --git a/src/bindings/python/src/openvino/_ov_api.py b/src/bindings/python/src/openvino/_ov_api.py index 972ab4a9eb81c0..da31fab4c95d8e 100644 --- a/src/bindings/python/src/openvino/_ov_api.py +++ b/src/bindings/python/src/openvino/_ov_api.py @@ -2,7 +2,8 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from typing import Any, Iterable, Union, Optional, Dict +from types import TracebackType +from typing import Any, Iterable, Union, Optional, Dict, Type from pathlib import Path @@ -21,22 +22,30 @@ ) -class Model(ModelBase): +class Model: def __init__(self, *args: Any, **kwargs: Any) -> None: if args and not kwargs: if isinstance(args[0], ModelBase): - super().__init__(args[0]) + self.__model = ModelBase(args[0]) elif isinstance(args[0], Node): - super().__init__(*args) + self.__model = ModelBase(*args) else: - super().__init__(*args) + self.__model = ModelBase(*args) if args and kwargs: - super().__init__(*args, **kwargs) + self.__model = ModelBase(*args, **kwargs) if kwargs and not args: - super().__init__(**kwargs) + self.__model = ModelBase(**kwargs) + + def __getattr__(self, name: str) -> Any: + if self.__model is None: + raise AttributeError(f"'Model' object has no attribute '{name}' or attribute is no longer accessible.") + return getattr(self.__model, name) def clone(self) -> "Model": - return Model(super().clone()) + return Model(self.__model.clone()) + + def __copy__(self) -> "Model": + raise TypeError("Cannot copy 'openvino.runtime.Model'. Please, use deepcopy instead.") def __deepcopy__(self, memo: Dict) -> "Model": """Returns a deepcopy of Model. @@ -44,7 +53,17 @@ def __deepcopy__(self, memo: Dict) -> "Model": :return: A copy of Model. :rtype: openvino.runtime.Model """ - return Model(super().clone()) + return Model(self.__model.clone()) + + def __enter__(self) -> "Model": + return self + + def __exit__(self, exc_type: Type[BaseException], exc_value: BaseException, traceback: TracebackType) -> None: + del self.__model + self.__model = None + + def __repr__(self) -> str: + return self.__model.__repr__() class InferRequest(_InferRequestWrapper): @@ -500,6 +519,8 @@ def read_model( config: Optional[dict] = None ) -> Model: config = {} if config is None else config + if isinstance(model, Model): + model = model._Model__model if isinstance(weights, Tensor): return Model(super().read_model(model, weights)) @@ -543,6 +564,8 @@ def compile_model( :return: A compiled model. :rtype: openvino.runtime.CompiledModel """ + if isinstance(model, Model): + model = model._Model__model if weights is None: if device_name is None: return CompiledModel( @@ -562,6 +585,16 @@ def compile_model( weights=weights, ) + def query_model( + self, + model: Model, + device_name: str, + config: Optional[dict] = None, + ) -> dict: + return super().query_model(model._Model__model, + device_name, + {} if config is None else config, ) + def import_model( self, model_stream: bytes, @@ -637,4 +670,6 @@ def compile_model( """ core = Core() + if isinstance(model, Model): + model = model._Model__model return core.compile_model(model, device_name, {} if config is None else config) diff --git a/src/bindings/python/src/openvino/properties/_properties.py b/src/bindings/python/src/openvino/properties/_properties.py index a3d9e2076ad072..ee0a612583431c 100644 --- a/src/bindings/python/src/openvino/properties/_properties.py +++ b/src/bindings/python/src/openvino/properties/_properties.py @@ -16,6 +16,9 @@ def __new__(cls, prop: Callable[..., Any]): # type: ignore def __call__(self, *args: Any) -> Callable[..., Any]: if args is not None: + from openvino import Model + if args and isinstance(args[0], Model): + return self.prop(args[0]._Model__model) return self.prop(*args) return self.prop() diff --git a/src/bindings/python/src/openvino/test_utils/__init__.py b/src/bindings/python/src/openvino/test_utils/__init__.py index e25fa9e67be800..bca79f8a4e2729 100644 --- a/src/bindings/python/src/openvino/test_utils/__init__.py +++ b/src/bindings/python/src/openvino/test_utils/__init__.py @@ -2,4 +2,4 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from .test_utils_api import compare_functions +from .test_api import compare_functions diff --git a/src/bindings/python/src/openvino/test_utils/test_api.py b/src/bindings/python/src/openvino/test_utils/test_api.py new file mode 100644 index 00000000000000..ce65eb9dcd820e --- /dev/null +++ b/src/bindings/python/src/openvino/test_utils/test_api.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .test_utils_api import compare_functions as compare_functions_base +from openvino.runtime import Model + + +def compare_functions(lhs: Model, rhs: Model, compare_tensor_names: bool = True) -> tuple: + return compare_functions_base(lhs._Model__model, rhs._Model__model, compare_tensor_names) diff --git a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp index 641893cdd267a2..90aece1803f4b4 100644 --- a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp +++ b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp @@ -23,6 +23,7 @@ #include "openvino/pass/low_latency.hpp" #include "openvino/pass/manager.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -34,7 +35,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_moc_transformations", - [](std::shared_ptr model, bool cf, bool smart_reshape) { + [](py::object& ie_api_model, bool cf, bool smart_reshape) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; if (smart_reshape) manager.register_pass(); @@ -48,7 +50,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_moc_legacy_transformations", - [](std::shared_ptr model, const std::vector& params_with_custom_types) { + [](py::object& ie_api_model, const std::vector& params_with_custom_types) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(params_with_custom_types); manager.run_passes(model); @@ -58,7 +61,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_low_latency_transformation", - [](std::shared_ptr model, bool use_const_initializer = true) { + [](py::object& ie_api_model, bool use_const_initializer = true) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(use_const_initializer); manager.run_passes(model); @@ -68,7 +72,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_pruning_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -77,7 +82,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_make_stateful_transformation", - [](std::shared_ptr model, const std::map& param_res_names) { + [](py::object& ie_api_model, const std::map& param_res_names) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(param_res_names); manager.run_passes(model); @@ -87,7 +93,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_make_stateful_transformation", - [](std::shared_ptr model, const ov::pass::MakeStateful::ParamResPairs& pairs_to_replace) { + [](py::object& ie_api_model, const ov::pass::MakeStateful::ParamResPairs& pairs_to_replace) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(pairs_to_replace); manager.run_passes(model); @@ -97,7 +104,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "compress_model_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); bool postponed = false; return ov::pass::compress_model_to_f16(model, postponed); }, @@ -105,7 +113,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "compress_quantize_weights_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -114,7 +123,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "convert_sequence_to_tensor_iterator_transformation", - [](std::shared_ptr model) { + [](py::object ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -123,7 +133,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_fused_names_cleanup", - [](std::shared_ptr model) { + [](py::object ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -132,7 +143,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "paged_attention_transformation", - [](std::shared_ptr model, bool use_block_indices_inputs, bool use_score_outputs) { + [](py::object& ie_api_model, bool use_block_indices_inputs, bool use_score_outputs) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(use_block_indices_inputs, use_score_outputs); manager.run_passes(model); @@ -143,7 +155,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "stateful_to_stateless_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index 758fb505f5f885..52707b0b8248ce 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -113,10 +113,13 @@ void regclass_frontend_FrontEnd(py::module m) { :rtype: openvino.runtime.Model )"); - fem.def("convert", - static_cast&) const>(&FrontEnd::convert), - py::arg("model"), - R"( + fem.def( + "convert", + [](FrontEnd& self, const py::object& ie_api_model) { + return self.convert(Common::utils::convert_to_model(ie_api_model)); + }, + py::arg("model"), + R"( Completely convert the remaining, not converted part of a function. :param model: Partially converted OpenVINO model. @@ -153,10 +156,13 @@ void regclass_frontend_FrontEnd(py::module m) { :rtype: openvino.runtime.Model )"); - fem.def("normalize", - &FrontEnd::normalize, - py::arg("model"), - R"( + fem.def( + "normalize", + [](FrontEnd& self, const py::object& ie_api_model) { + self.normalize(Common::utils::convert_to_model(ie_api_model)); + }, + py::arg("model"), + R"( Runs normalization passes on function that was loaded with partial conversion. :param model : Partially converted OpenVINO model. diff --git a/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp b/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp index 587d3906b02607..40a603977159a5 100644 --- a/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp +++ b/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp @@ -35,6 +35,7 @@ void regclass_graph_AttributeVisitor(py::module m) { "on_attributes", [](ov::AttributeVisitor* self, py::dict& attributes) { py::object float_32_type = py::module_::import("numpy").attr("float32"); + py::object model = py::module_::import("openvino.runtime").attr("Model"); for (const auto& attribute : attributes) { if (py::isinstance(attribute.second)) { visit_attribute(attributes, attribute, self); @@ -48,6 +49,10 @@ void regclass_graph_AttributeVisitor(py::module m) { visit_attribute(attributes, attribute, self); } else if (py::isinstance(attribute.second)) { visit_attribute>(attributes, attribute, self); + } else if (py::isinstance(attribute.second, model)) { + auto attr_casted = attribute.second.attr("_Model__model").cast>(); + self->on_attribute>(attribute.first.cast(), attr_casted); + attributes[attribute.first] = std::move(attr_casted); } else if (py::isinstance(attribute.second)) { visit_attribute(attributes, attribute, self); } else if (py::isinstance(attribute.second)) { diff --git a/src/bindings/python/src/pyopenvino/graph/model.cpp b/src/bindings/python/src/pyopenvino/graph/model.cpp index e3c648c0f4cfcb..a482ba55e46e74 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.cpp +++ b/src/bindings/python/src/pyopenvino/graph/model.cpp @@ -1328,10 +1328,6 @@ void regclass_graph_Model(py::module m) { outputs_str + "\n]>"; }); - model.def("__copy__", [](ov::Model& self) { - throw py::type_error("Cannot copy 'openvino.runtime.Model. Please, use deepcopy instead."); - }); - model.def("get_rt_info", (PyRTMap & (ov::Model::*)()) & ov::Model::get_rt_info, py::return_value_policy::reference_internal, diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp index c452e2fe4ac849..8cd52099436d2b 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp @@ -12,6 +12,7 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/ops/if.hpp" #include "pyopenvino/graph/ops/util/multisubgraph.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -77,10 +78,14 @@ void regclass_graph_op_If(py::module m) { :rtype: openvino.Model )"); - cls.def("set_then_body", - &ov::op::v8::If::set_then_body, - py::arg("body"), - R"( + cls.def( + "set_then_body", + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto body = Common::utils::convert_to_model(ie_api_model); + return self->set_then_body(body); + }, + py::arg("body"), + R"( Sets new Model object as new then_body. :param body: new body for 'then' branch. @@ -89,10 +94,14 @@ void regclass_graph_op_If(py::module m) { :rtype: None )"); - cls.def("set_else_body", - &ov::op::v8::If::set_else_body, - py::arg("body"), - R"( + cls.def( + "set_else_body", + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto body = Common::utils::convert_to_model(ie_api_model); + return self->set_else_body(body); + }, + py::arg("body"), + R"( Sets new Model object as new else_body. :param body: new body for 'else' branch. @@ -156,11 +165,15 @@ void regclass_graph_op_If(py::module m) { :rtype: openvino.Model )"); - cls.def("set_function", - &ov::op::util::MultiSubGraphOp::set_function, - py::arg("index"), - py::arg("func"), - R"( + cls.def( + "set_function", + [](const std::shared_ptr& self, int index, const py::object& ie_api_model) { + const auto func = Common::utils::convert_to_model(ie_api_model); + self->set_function(index, func); + }, + py::arg("index"), + py::arg("func"), + R"( Adds sub-graph to MultiSubGraphOp. :param index: index of new sub-graph. diff --git a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp index 536d97d17273ab..069a1376eba758 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp @@ -11,6 +11,7 @@ #include "openvino/util/log.hpp" #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/ops/util/multisubgraph.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -91,7 +92,8 @@ void regclass_graph_op_Loop(py::module m) { cls.def( "set_function", - [](const std::shared_ptr& self, const std::shared_ptr& func) { + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto func = Common::utils::convert_to_model(ie_api_model); self->set_function(func); }, py::arg("func")); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp index 5932656c3eccb9..3039aa90008f29 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp @@ -9,6 +9,7 @@ #include "openvino/op/util/sub_graph_base.hpp" #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/ops/util/multisubgraph.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -18,7 +19,13 @@ void regclass_graph_op_TensorIterator(py::module m) { "tensor_iterator"); cls.doc() = "openvino.impl.op.TensorIterator wraps ov::op::v0::TensorIterator"; cls.def(py::init<>()); - cls.def("set_body", &ov::op::v0::TensorIterator::set_body, py::arg("body")); + cls.def( + "set_body", + [](const std::shared_ptr& self, py::object& ie_api_model) { + const auto body = Common::utils::convert_to_model(ie_api_model); + self->set_body(body); + }, + py::arg("body")); cls.def("set_invariant_input", &ov::op::v0::TensorIterator::set_invariant_input, py::arg("body_parameter"), @@ -68,7 +75,8 @@ void regclass_graph_op_TensorIterator(py::module m) { cls.def( "set_function", - [](const std::shared_ptr& self, const std::shared_ptr& func) { + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto func = Common::utils::convert_to_model(ie_api_model); self->set_function(func); }, py::arg("func")); diff --git a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp index 9bd2833308db41..5fb4ddb4bd6dc8 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp @@ -35,10 +35,14 @@ void regclass_passes_Manager(py::module m) { :type new_state: bool )"); - manager.def("run_passes", - &ov::pass::Manager::run_passes, - py::arg("model"), - R"( + manager.def( + "run_passes", + [](ov::pass::Manager& self, const py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); + self.run_passes(model); + }, + py::arg("model"), + R"( Executes sequence of transformations on given Model. :param model: openvino.runtime.Model to be transformed. diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp index a19f2b2f482337..25fdd7b007a297 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp @@ -11,6 +11,7 @@ #include "openvino/core/node.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "pyopenvino/core/common.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -553,7 +554,14 @@ void regclass_graph_PrePostProcessor(py::module m) { "PrePostProcessor"); proc.doc() = "openvino.runtime.preprocess.PrePostProcessor wraps ov::preprocess::PrePostProcessor"; - proc.def(py::init&>(), py::arg("model")); + proc.def(py::init([](const py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); + return std::make_shared(model); + }), + py::arg("model"), + R"( + It creates PrePostProcessor. + )"); proc.def("input", [](ov::preprocess::PrePostProcessor& self) { return &self.input(); @@ -591,7 +599,15 @@ void regclass_graph_PrePostProcessor(py::module m) { }, py::arg("output_index")); - proc.def("build", &ov::preprocess::PrePostProcessor::build, py::call_guard()); + proc.def("build", [](ov::preprocess::PrePostProcessor& self) { + std::shared_ptr model; + { + py::gil_scoped_release release; + model = self.build(); + } + py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + return model_class(py::cast(model)); + }); proc.def("__str__", [](const ov::preprocess::PrePostProcessor& self) -> std::string { std::stringstream ss; diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index ee3ef1c8b8144e..c385e5467224c0 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -98,10 +98,25 @@ PYBIND11_MODULE(_pyopenvino, m) { m.def("get_version", &get_version); m.def("get_batch", &ov::get_batch); - m.def("set_batch", &ov::set_batch); + m.def( + "get_batch", + [](const py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); + return ov::get_batch(model); + }, + py::arg("model")); + m.def( + "set_batch", + [](const py::object& ie_api_model, ov::Dimension value) { + auto model = Common::utils::convert_to_model(ie_api_model); + ov::set_batch(model, value); + }, + py::arg("model"), + py::arg("dimension")); m.def( "set_batch", - [](const std::shared_ptr& model, int64_t value) { + [](const py::object& ie_api_model, int64_t value) { + auto model = Common::utils::convert_to_model(ie_api_model); ov::set_batch(model, ov::Dimension(value)); }, py::arg("model"), @@ -109,10 +124,11 @@ PYBIND11_MODULE(_pyopenvino, m) { m.def( "serialize", - [](std::shared_ptr& model, + [](py::object& ie_api_model, const py::object& xml_path, const py::object& bin_path, const std::string& version) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::serialize(model, Common::utils::convert_path_to_string(xml_path), Common::utils::convert_path_to_string(bin_path), @@ -173,15 +189,9 @@ PYBIND11_MODULE(_pyopenvino, m) { m.def( "save_model", - [](std::shared_ptr& model, - const py::object& xml_path, - bool compress_to_fp16) { - if (model == nullptr) { - throw py::attribute_error("'model' argument is required and cannot be None."); - } - ov::save_model(model, - Common::utils::convert_path_to_string(xml_path), - compress_to_fp16); + [](py::object& ie_api_model, const py::object& xml_path, bool compress_to_fp16) { + const auto model = Common::utils::convert_to_model(ie_api_model); + ov::save_model(model, Common::utils::convert_path_to_string(xml_path), compress_to_fp16); }, py::arg("model"), py::arg("output_model"), diff --git a/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt b/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt index 94a1e62b7e1809..81d993b93f95a4 100644 --- a/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt @@ -39,7 +39,7 @@ endif() # perform copy add_custom_command(TARGET ${TARGET_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py + COMMAND ${CMAKE_COMMAND} -E copy_directory ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} ) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} @@ -53,7 +53,7 @@ install(TARGETS ${TARGET_NAME} LIBRARY DESTINATION tests/${OV_CPACK_PYTHONDIR}/openvino/test_utils COMPONENT tests EXCLUDE_FROM_ALL) -install(PROGRAMS ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils/__init__.py - DESTINATION tests/${OV_CPACK_PYTHONDIR}/openvino/test_utils +install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils + DESTINATION tests/${OV_CPACK_PYTHONDIR}/openvino COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index c747e2d3b81166..bd1520119bd8a9 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -311,6 +311,18 @@ std::string convert_path_to_string(const py::object& path) { OPENVINO_THROW(str.str()); } +std::shared_ptr convert_to_model(const py::object& obj) { + if (!py::isinstance(obj, py::module_::import("openvino").attr("Model"))) { + throw py::type_error("Incompatible `model` argument. Please provide a valid openvino.Model instance."); + } + auto model = obj.attr("_Model__model").cast>(); + if (model == nullptr) { + throw py::attribute_error("Invalid openvino.Model instance. It cannot be None. " + "Please make sure it is not used outside of its context."); + } + return model; +} + Version convert_to_version(const std::string& version) { if (version == "UNSPECIFIED") return Version::UNSPECIFIED; diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index 2a7b6505269535..224b70bb1fa176 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -81,6 +81,8 @@ class MemoryBuffer : public std::streambuf { std::string convert_path_to_string(const py::object& path); + std::shared_ptr convert_to_model(const py::object& obj); + void deprecation_warning(const std::string& function_name, const std::string& version = std::string(), const std::string& message = std::string(), int stacklevel=2); void raise_not_implemented(); diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index 0ae592b2d1dff5..425cdb97129c69 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -3,11 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import os +import sys import numpy as np import pytest import math from contextlib import nullcontext as does_not_raise from copy import copy +import tempfile import openvino.runtime.opset13 as ops from openvino import ( @@ -801,13 +803,49 @@ def test_model_add_remove_variable(): def test_save_model_with_none(): - with pytest.raises(AttributeError) as e: + with pytest.raises(TypeError) as e: save_model(model=None, output_model="model.xml") - assert "'model' argument is required and cannot be None." in str(e.value) + assert "Please provide a valid openvino.Model instance." in str(e.value) def test_copy_failed(): model = generate_add_model() with pytest.raises(TypeError) as e: copy(model) - assert "Cannot copy 'openvino.runtime.Model. Please, use deepcopy instead." in str(e.value) + assert "Cannot copy 'openvino.runtime.Model'. Please, use deepcopy instead." in str(e.value) + + +def test_model_attr_not_found(): + model = generate_add_model() + with pytest.raises(AttributeError) as e: + _ = model.not_found_attr + assert "'openvino._pyopenvino.Model' object has no attribute 'not_found_attr'" in str(e.value) + + +def test_model_with_statement(): + mem_model = generate_model_with_memory(input_shape=Shape([2, 1]), data_type=Type.f32) + with tempfile.TemporaryDirectory() as model_save_dir: + save_model(mem_model, f"{model_save_dir}/model.xml") + + with Core().read_model(f"{model_save_dir}/model.xml") as model: + assert mem_model.friendly_name == model.friendly_name + + with pytest.raises(AttributeError): + save_model(model, f"{model_save_dir}/model.xml") + + # Behavior after exiting the context manager + with mem_model as model: + pass + assert isinstance(mem_model, Model) + with pytest.raises(AttributeError, match="attribute is no longer accessible."): + model.friendly_name + + +@pytest.mark.skipif(sys.platform != "win32", reason="Windows only") +def test_tempdir_save_load_error(): + # Generate a model with stateful components, ensuring the .bin file will be non-empty after saving + mem_model = generate_model_with_memory(input_shape=Shape([2, 1]), data_type=Type.f32) + with pytest.raises((NotADirectoryError, PermissionError)): + with tempfile.TemporaryDirectory() as model_save_dir: + save_model(mem_model, f"{model_save_dir}/model.xml") + _ = Core().read_model(f"{model_save_dir}/model.xml") From 375ebd2f9a73d5baaff1c7a71199dad74fa3db13 Mon Sep 17 00:00:00 2001 From: Alicja Miloszewska Date: Fri, 20 Dec 2024 12:34:32 +0100 Subject: [PATCH 38/60] [Py OV] Replace imports of openvino.runtime in openvino module (#28166) ### Details: - Replace imports of openvino.runtime in openvino module ### Tickets: Part of [openvino.runtime deprecation](https://jira.devtools.intel.com/browse/CVS-129450) Signed-off-by: Alicja Miloszewska --- .../python/src/openvino/frontend/frontend.py | 2 +- .../src/openvino/frontend/jax/jaxpr_decoder.py | 2 +- .../python/src/openvino/frontend/jax/utils.py | 2 +- .../src/openvino/frontend/pytorch/fx_decoder.py | 2 +- .../openvino/frontend/pytorch/torchdynamo/backend.py | 4 ++-- .../frontend/pytorch/torchdynamo/backend_utils.py | 2 +- .../openvino/frontend/pytorch/torchdynamo/compile.py | 2 +- .../openvino/frontend/pytorch/torchdynamo/execute.py | 2 +- .../src/openvino/frontend/pytorch/ts_decoder.py | 4 ++-- .../python/src/openvino/frontend/pytorch/utils.py | 4 ++-- .../src/openvino/frontend/tensorflow/node_decoder.py | 2 +- .../python/src/openvino/frontend/tensorflow/utils.py | 2 +- src/bindings/python/src/openvino/helpers/packing.py | 2 +- src/bindings/python/src/openvino/opset1/ops.py | 12 ++++++------ src/bindings/python/src/openvino/opset10/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset11/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset12/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset13/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset14/ops.py | 10 +++++----- src/bindings/python/src/openvino/opset15/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset16/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset2/ops.py | 11 +++++------ src/bindings/python/src/openvino/opset3/ops.py | 11 +++++------ src/bindings/python/src/openvino/opset4/ops.py | 11 +++++------ src/bindings/python/src/openvino/opset5/ops.py | 11 +++++------ src/bindings/python/src/openvino/opset6/ops.py | 8 ++++---- src/bindings/python/src/openvino/opset7/ops.py | 11 +++++------ src/bindings/python/src/openvino/opset8/ops.py | 10 +++++----- src/bindings/python/src/openvino/opset9/ops.py | 8 ++++---- .../preprocess/torchvision/preprocess_converter.py | 2 +- .../torchvision/torchvision_preprocessing.py | 8 ++++---- .../python/src/openvino/utils/broadcasting.py | 2 +- src/bindings/python/src/openvino/utils/decorators.py | 2 +- .../python/src/openvino/utils/input_validation.py | 2 +- .../python/src/openvino/utils/node_factory.py | 4 ++-- src/bindings/python/src/openvino/utils/reduction.py | 2 +- src/bindings/python/src/openvino/utils/types.py | 4 ++-- 37 files changed, 102 insertions(+), 107 deletions(-) diff --git a/src/bindings/python/src/openvino/frontend/frontend.py b/src/bindings/python/src/openvino/frontend/frontend.py index 4d549d24b4ef7c..6a16d5a573b7d7 100644 --- a/src/bindings/python/src/openvino/frontend/frontend.py +++ b/src/bindings/python/src/openvino/frontend/frontend.py @@ -7,7 +7,7 @@ from openvino._pyopenvino import FrontEnd as FrontEndBase from openvino._pyopenvino import FrontEndManager as FrontEndManagerBase from openvino._pyopenvino import InputModel -from openvino.runtime import Model +from openvino import Model class FrontEnd(FrontEndBase): diff --git a/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py b/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py index 914f6b2e2ee548..9072598f824939 100644 --- a/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py +++ b/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py @@ -6,7 +6,7 @@ import jax.core from openvino.frontend.jax.py_jax_frontend import _FrontEndJaxDecoder as Decoder -from openvino.runtime import PartialShape, Type as OVType, OVAny +from openvino import PartialShape, Type as OVType, OVAny from openvino.frontend.jax.utils import jax_array_to_ov_const, get_ov_type_for_value, \ ivalue_to_constant, param_to_constants diff --git a/src/bindings/python/src/openvino/frontend/jax/utils.py b/src/bindings/python/src/openvino/frontend/jax/utils.py index 4535265d6de082..659677b11d5af8 100644 --- a/src/bindings/python/src/openvino/frontend/jax/utils.py +++ b/src/bindings/python/src/openvino/frontend/jax/utils.py @@ -8,7 +8,7 @@ import jax.numpy as jnp import numpy as np from openvino.frontend.jax.passes import filter_element, filter_ivalue, filter_param -from openvino.runtime import op, Type as OVType, Shape, OVAny +from openvino import op, Type as OVType, Shape, OVAny numpy_to_ov_type_map = { np.float32: OVType.f32, diff --git a/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py index c448571f1ac17a..81a2764ee1188d 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py @@ -10,7 +10,7 @@ from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType -from openvino.runtime import PartialShape, Type as OVType, OVAny, Shape +from openvino import PartialShape, Type as OVType, OVAny, Shape from openvino.frontend.pytorch.utils import make_constant, fetch_attr, pt_to_ov_type_map, torch_tensor_to_ov_const logger = logging.getLogger(__name__) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py index 9f2ef019769875..a9a65781dcb254 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py @@ -18,7 +18,7 @@ from torch._decomp import decomposition_table, get_decompositions from openvino.frontend import FrontEndManager -from openvino.runtime import Core, Type, PartialShape +from openvino import Core, Type, PartialShape from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder from openvino.frontend.pytorch.torchdynamo import decompositions from openvino.frontend.pytorch.torchdynamo.decompositions import get_aot_decomposition_list, get_inf_decomposition_list @@ -27,7 +27,7 @@ from openvino.frontend.pytorch.torchdynamo.compile import cached_model_name, openvino_compile_cached_model from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_model_caching, _get_decompositions, _get_aot_autograd -from openvino.runtime import Core, Type, PartialShape +from openvino import Core, Type, PartialShape logger = logging.getLogger(__name__) logger.setLevel(logging.WARNING) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py index 47b3b82806b18b..c9a772b3feac42 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py @@ -5,7 +5,7 @@ # mypy: ignore-errors from typing import Optional, Any -from openvino.runtime import Core +from openvino import Core def _get_device(options) -> Optional[Any]: diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py index fa446893a05d07..ca8d5478e76c15 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py @@ -14,7 +14,7 @@ from openvino.frontend import FrontEndManager from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder -from openvino.runtime import Core, Type, PartialShape, serialize +from openvino import Core, Type, PartialShape, serialize from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_config, _is_cache_dir_in_config from typing import Callable, Optional diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py index 4f41f7b5a6a9de..7527ad7acb37a4 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py @@ -20,7 +20,7 @@ from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder from openvino.frontend.pytorch.torchdynamo.partition import Partitioner from openvino.frontend.pytorch.torchdynamo.compile import openvino_compile -from openvino.runtime import Core, Type, PartialShape +from openvino import Core, Type, PartialShape from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_aot_autograd from typing import Callable, Optional, Any diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 6d8fdb1658793e..7bb8073167a654 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -6,7 +6,7 @@ from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType -from openvino.runtime import op, PartialShape, Type as OVType, OVAny +from openvino import op, PartialShape, Type as OVType, OVAny from openvino.frontend.pytorch.utils import ( ivalue_to_constant, get_value_from_getattr, @@ -15,7 +15,7 @@ convert_quantized_tensor, graph_has_ops, ) -from openvino.runtime import opset11 as ops +from openvino import opset11 as ops from openvino.frontend.pytorch import quantized, patch_model from openvino.frontend.pytorch.module_extension import ModuleExtension diff --git a/src/bindings/python/src/openvino/frontend/pytorch/utils.py b/src/bindings/python/src/openvino/frontend/pytorch/utils.py index 826d766505fa79..9ba36707037c9e 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/utils.py @@ -7,8 +7,8 @@ import torch import numpy as np -from openvino.runtime import op, Type as OVType, Shape, Tensor -from openvino.runtime import opset11 as ops +from openvino import op, Type as OVType, Shape, Tensor +from openvino import opset11 as ops def make_constant(*args, **kwargs): diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py index fcedd7a74c2b51..d15262cbc30366 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py @@ -7,7 +7,7 @@ import numpy as np import tensorflow as tf from openvino.frontend.tensorflow.py_tensorflow_frontend import _FrontEndDecoderBase as DecoderBase -from openvino.runtime import PartialShape, Type, OVAny, Tensor +from openvino import PartialShape, Type, OVAny, Tensor def tf_type_to_ov_type(tf_type_int): diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py index 74c0dfff92297e..7de5dc950be53e 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py @@ -8,7 +8,7 @@ import logging as log import numpy as np import sys -from openvino.runtime import PartialShape, Dimension, Type +from openvino import PartialShape, Dimension, Type from packaging.version import parse, Version from typing import List, Dict, Union diff --git a/src/bindings/python/src/openvino/helpers/packing.py b/src/bindings/python/src/openvino/helpers/packing.py index 796af87402f3a6..d0956e09fc6261 100644 --- a/src/bindings/python/src/openvino/helpers/packing.py +++ b/src/bindings/python/src/openvino/helpers/packing.py @@ -5,7 +5,7 @@ import numpy as np from typing import Union -from openvino.runtime import Type, Shape +from openvino import Type, Shape def pack_data(array: np.ndarray, type: Type) -> np.ndarray: diff --git a/src/bindings/python/src/openvino/opset1/ops.py b/src/bindings/python/src/openvino/opset1/ops.py index edca6c62a0b246..e264aea304fb1f 100644 --- a/src/bindings/python/src/openvino/opset1/ops.py +++ b/src/bindings/python/src/openvino/opset1/ops.py @@ -8,17 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, PartialShape, Type +from openvino import Node, PartialShape, Type from openvino.op import Constant, Parameter, tensor_iterator -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset10/ops.py b/src/bindings/python/src/openvino/opset10/ops.py index c7b75777484a59..d0bc3cbf1cba4a 100644 --- a/src/bindings/python/src/openvino/opset10/ops.py +++ b/src/bindings/python/src/openvino/opset10/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import List, Optional -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, as_node, diff --git a/src/bindings/python/src/openvino/opset11/ops.py b/src/bindings/python/src/openvino/opset11/ops.py index 575c99501d2d6c..95767b4800db1c 100644 --- a/src/bindings/python/src/openvino/opset11/ops.py +++ b/src/bindings/python/src/openvino/opset11/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import List, Optional -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, ) diff --git a/src/bindings/python/src/openvino/opset12/ops.py b/src/bindings/python/src/openvino/opset12/ops.py index 928bf4f71a9773..4b354b1fcff973 100644 --- a/src/bindings/python/src/openvino/opset12/ops.py +++ b/src/bindings/python/src/openvino/opset12/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import Optional -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, as_node, diff --git a/src/bindings/python/src/openvino/opset13/ops.py b/src/bindings/python/src/openvino/opset13/ops.py index 12f0d06b1a28e6..5c6863740120f8 100644 --- a/src/bindings/python/src/openvino/opset13/ops.py +++ b/src/bindings/python/src/openvino/opset13/ops.py @@ -11,12 +11,12 @@ log = logging.getLogger(__name__) -from openvino.runtime import Node, Shape, Type, Output, Tensor +from openvino import Node, Shape, Type, Output, Tensor from openvino.op import Constant, Result from openvino.opset1 import convert_like -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op, overloading -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op, overloading +from openvino.utils.types import ( NumericData, NodeInput, NumericType, diff --git a/src/bindings/python/src/openvino/opset14/ops.py b/src/bindings/python/src/openvino/opset14/ops.py index fa872d24eb7f1a..59e1bfd3e89c6f 100644 --- a/src/bindings/python/src/openvino/opset14/ops.py +++ b/src/bindings/python/src/openvino/opset14/ops.py @@ -7,11 +7,11 @@ from typing import Union, Optional, List -from openvino.runtime import Node, Type -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.types import TensorShape -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import NodeInput, as_node, as_nodes +from openvino import Node, Type +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.types import TensorShape +from openvino.utils.decorators import nameable_op +from openvino.utils.types import NodeInput, as_node, as_nodes _get_node_factory_opset14 = partial(_get_node_factory, "opset14") diff --git a/src/bindings/python/src/openvino/opset15/ops.py b/src/bindings/python/src/openvino/opset15/ops.py index 8e6b8bd46d5f7c..97d4419fc4834b 100644 --- a/src/bindings/python/src/openvino/opset15/ops.py +++ b/src/bindings/python/src/openvino/opset15/ops.py @@ -7,12 +7,12 @@ from typing import List, Literal, Optional import numpy as np -from openvino.runtime import Node, Type +from openvino import Node, Type from openvino.opset1 import convert_like from openvino.opset14 import constant -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op -from openvino.runtime.utils.types import NodeInput, as_nodes +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op +from openvino.utils.types import NodeInput, as_nodes _get_node_factory_opset15 = partial(_get_node_factory, "opset15") diff --git a/src/bindings/python/src/openvino/opset16/ops.py b/src/bindings/python/src/openvino/opset16/ops.py index 60656f6d993b6a..e5ebdc7a2a11d6 100644 --- a/src/bindings/python/src/openvino/opset16/ops.py +++ b/src/bindings/python/src/openvino/opset16/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import Optional -from openvino.runtime import Node -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.types import NodeInput, as_nodes +from openvino import Node +from openvino.utils.decorators import nameable_op +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.types import NodeInput, as_nodes _get_node_factory_opset16 = partial(_get_node_factory, "opset16") diff --git a/src/bindings/python/src/openvino/opset2/ops.py b/src/bindings/python/src/openvino/opset2/ops.py index 45b33f5bc0288b..f76f608fe9a5c7 100644 --- a/src/bindings/python/src/openvino/opset2/ops.py +++ b/src/bindings/python/src/openvino/opset2/ops.py @@ -9,18 +9,17 @@ from functools import partial import warnings -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset3/ops.py b/src/bindings/python/src/openvino/opset3/ops.py index 989f5819acb685..1c2c7e309fe919 100644 --- a/src/bindings/python/src/openvino/opset3/ops.py +++ b/src/bindings/python/src/openvino/opset3/ops.py @@ -8,18 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset4/ops.py b/src/bindings/python/src/openvino/opset4/ops.py index 4f6ba016852b02..e6f3a3a1550937 100644 --- a/src/bindings/python/src/openvino/opset4/ops.py +++ b/src/bindings/python/src/openvino/opset4/ops.py @@ -8,18 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset5/ops.py b/src/bindings/python/src/openvino/opset5/ops.py index 20057b78c7c31d..9217830752b1d8 100644 --- a/src/bindings/python/src/openvino/opset5/ops.py +++ b/src/bindings/python/src/openvino/opset5/ops.py @@ -8,18 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter, loop -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset6/ops.py b/src/bindings/python/src/openvino/opset6/ops.py index 8020715f20dea3..340d0405b4ba23 100644 --- a/src/bindings/python/src/openvino/opset6/ops.py +++ b/src/bindings/python/src/openvino/opset6/ops.py @@ -9,13 +9,13 @@ from functools import partial, singledispatch -from openvino.runtime import Node, Type, PartialShape, Output, Shape +from openvino import Node, Type, PartialShape, Output, Shape from openvino.op import assign, Constant, Parameter from openvino.op import read_value as _read_value from openvino.op.util import VariableInfo, Variable -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op, overloading -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op, overloading +from openvino.utils.types import ( NodeInput, NumericType, TensorShape, diff --git a/src/bindings/python/src/openvino/opset7/ops.py b/src/bindings/python/src/openvino/opset7/ops.py index 59e09b64888eb1..e33d266debedf1 100644 --- a/src/bindings/python/src/openvino/opset7/ops.py +++ b/src/bindings/python/src/openvino/opset7/ops.py @@ -7,18 +7,17 @@ from typing import Callable, Iterable, List, Optional, Set, Union import numpy as np -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset8/ops.py b/src/bindings/python/src/openvino/opset8/ops.py index 6995d55a28a776..a9a868e7b541d8 100644 --- a/src/bindings/python/src/openvino/opset8/ops.py +++ b/src/bindings/python/src/openvino/opset8/ops.py @@ -9,15 +9,15 @@ import numpy as np from openvino.exceptions import UserInputError from openvino.op import Constant, Parameter, if_op -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.input_validation import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.input_validation import ( check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.types import ( +from openvino.utils.types import ( NodeInput, TensorShape, as_node, diff --git a/src/bindings/python/src/openvino/opset9/ops.py b/src/bindings/python/src/openvino/opset9/ops.py index a6d45cfd0be2cc..e2264845e058dc 100644 --- a/src/bindings/python/src/openvino/opset9/ops.py +++ b/src/bindings/python/src/openvino/opset9/ops.py @@ -7,10 +7,10 @@ from typing import Optional import numpy as np -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, as_node, diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py b/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py index c14635cc118208..717e945217468c 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py @@ -5,7 +5,7 @@ from typing import Callable, Any, Union import logging -import openvino.runtime as ov +import openvino as ov class PreprocessConverter(): diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py b/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py index f8b51afd546f57..5dad42b47da44a 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py @@ -20,10 +20,10 @@ import torchvision.transforms as transforms from torchvision.transforms import InterpolationMode -import openvino.runtime as ov -import openvino.runtime.opset11 as ops -from openvino.runtime import Layout, Type -from openvino.runtime.utils.decorators import custom_preprocess_function +import openvino as ov +import openvino.opset11 as ops +from openvino import Layout, Type +from openvino.utils.decorators import custom_preprocess_function from openvino.preprocess import PrePostProcessor, ResizeAlgorithm, ColorFormat diff --git a/src/bindings/python/src/openvino/utils/broadcasting.py b/src/bindings/python/src/openvino/utils/broadcasting.py index a950aea9bba820..01549625e2c628 100644 --- a/src/bindings/python/src/openvino/utils/broadcasting.py +++ b/src/bindings/python/src/openvino/utils/broadcasting.py @@ -5,7 +5,7 @@ import logging from typing import Optional -from openvino.runtime import AxisSet +from openvino import AxisSet from openvino.utils.types import ( TensorShape, ) diff --git a/src/bindings/python/src/openvino/utils/decorators.py b/src/bindings/python/src/openvino/utils/decorators.py index 604c0745860bf6..9418c359d129e8 100644 --- a/src/bindings/python/src/openvino/utils/decorators.py +++ b/src/bindings/python/src/openvino/utils/decorators.py @@ -6,7 +6,7 @@ from inspect import signature from typing import Any, Callable, Dict, Optional, Union, get_origin, get_args -from openvino.runtime import Node, Output +from openvino import Node, Output from openvino.utils.types import NodeInput, as_node, as_nodes diff --git a/src/bindings/python/src/openvino/utils/input_validation.py b/src/bindings/python/src/openvino/utils/input_validation.py index e79a16c48581b1..1de08452e1da9f 100644 --- a/src/bindings/python/src/openvino/utils/input_validation.py +++ b/src/bindings/python/src/openvino/utils/input_validation.py @@ -9,7 +9,7 @@ import numpy as np -from openvino.runtime.exceptions import UserInputError +from openvino.exceptions import UserInputError log = logging.getLogger(__name__) diff --git a/src/bindings/python/src/openvino/utils/node_factory.py b/src/bindings/python/src/openvino/utils/node_factory.py index 9841daaea4e818..e999ae6988814a 100644 --- a/src/bindings/python/src/openvino/utils/node_factory.py +++ b/src/bindings/python/src/openvino/utils/node_factory.py @@ -9,9 +9,9 @@ from openvino._pyopenvino import NodeFactory as _NodeFactory -from openvino.runtime import Node, Output, Extension +from openvino import Node, Output, Extension -from openvino.runtime.exceptions import UserInputError +from openvino.exceptions import UserInputError DEFAULT_OPSET = "opset13" diff --git a/src/bindings/python/src/openvino/utils/reduction.py b/src/bindings/python/src/openvino/utils/reduction.py index 71d0af8de7376e..e6be6d0ac9a104 100644 --- a/src/bindings/python/src/openvino/utils/reduction.py +++ b/src/bindings/python/src/openvino/utils/reduction.py @@ -4,7 +4,7 @@ from typing import Iterable, Optional -from openvino.runtime import Node +from openvino import Node def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]: diff --git a/src/bindings/python/src/openvino/utils/types.py b/src/bindings/python/src/openvino/utils/types.py index 854cc0c7f6411d..b3543739741d94 100644 --- a/src/bindings/python/src/openvino/utils/types.py +++ b/src/bindings/python/src/openvino/utils/types.py @@ -9,8 +9,8 @@ import numpy as np -from openvino.runtime.exceptions import OVTypeError -from openvino.runtime import Node, Shape, Output, Type +from openvino.exceptions import OVTypeError +from openvino import Node, Shape, Output, Type from openvino.op import Constant log = logging.getLogger(__name__) From d8e276d8e3dbfef8abb3e12f521c81724e9b48a6 Mon Sep 17 00:00:00 2001 From: Yury Gaydaychuk Date: Fri, 20 Dec 2024 12:56:42 +0100 Subject: [PATCH 39/60] [Commit slider] LLM bench support (#28037) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- .../tools/commit_slider/utils/cfg.json | 1 + .../utils/cfg_samples/llm_bench.json | 72 ++++++++ .../tools/commit_slider/utils/modes.py | 154 ++++++++++++++++++ 3 files changed, 227 insertions(+) create mode 100644 src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/llm_bench.json diff --git a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg.json b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg.json index 3e70fbd9f98df1..18f2db370fb65b 100644 --- a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg.json +++ b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg.json @@ -4,6 +4,7 @@ "bmPerf" : "BenchmarkAppPerformanceMode", "compareBlobs" : "CompareBlobsMode", "ac" : "AccuracyCheckerMode", + "llmBench" : "LLMBenchMode", "nop" : "NopMode" }, "traversalMap" : { diff --git a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/llm_bench.json b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/llm_bench.json new file mode 100644 index 00000000000000..5ad25c48ec2ad9 --- /dev/null +++ b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/llm_bench.json @@ -0,0 +1,72 @@ +{ + "appCmd":"source {venvName}/bin/activate && cd {appPath} && python3.10 -m pip install --upgrade pip && python3.10 -m pip install openvino=={wheelVersion} openvino_genai=={wheelVersion} openvino_tokenizers=={wheelVersion} --find-links={precommitPath}wheels/ && python3.10 -m pip install -r requirements.txt && {cmd}", + "appPath" : "{appPath}", + "venvCfg":{ + "venvEnabled":true, + "venvDir":"{workPath}/venv/", + "venvName":"tempVenv" + }, + "commandList":[ + + ], + "runConfig":{ + "mode":"llmBench", + "traversal":"firstFailedVersion", + "perfAppropriateDeviation" : 0.05, + "commitList" : { + "getCommitListCmd" : "git log {c1}..{c2} --boundary --pretty=\"%h\"" + } + }, + "dlbConfig":{ + "launchedAsJob":false, + "toolName":"", + "wheelVersionsMap":{ + + }, + "commonPath":"{commitPath}", + "subPath":"{subPath}", + "appPath":"", + "appCmd":"" + }, + "cachedPathConfig":{ + "enabled":true, + "scheme":"mandatory", + "passCmdList":false, + "changeAppPath":false, + "commonPath":"{commitPath}", + "subPath":"{subPath}", + "cashMap":{ + + } + }, + "substitutionRules":[ + { + "name":"precommitPath", + "enabled":true, + "type":"map", + "placeholder":"precommitPath", + "from":"$.cachedPathConfig.cashMap", + "to":"$.appCmd" + }, + { + "name":"wheelVersion", + "enabled":true, + "type":"map", + "placeholder":"wheelVersion", + "from":"$.dlbConfig.wheelVersionsMap", + "to":"$.appCmd" + } + ], + "subscriptions":[ + { + "name":"wheelPathsMap", + "enabled":true + }, + { + "name":"wheelVersionsMap", + "enabled":true + } + ], + "verboseOutput" : true, + "checkIfBordersDiffer" : true + } \ No newline at end of file diff --git a/src/plugins/intel_cpu/tools/commit_slider/utils/modes.py b/src/plugins/intel_cpu/tools/commit_slider/utils/modes.py index 6c1024ef1234a9..9286bf2d33bc07 100644 --- a/src/plugins/intel_cpu/tools/commit_slider/utils/modes.py +++ b/src/plugins/intel_cpu/tools/commit_slider/utils/modes.py @@ -284,6 +284,160 @@ def getCommitInfo(self, commit): ci=super().getCommitInfo(commit), d=commit.perfRel) + +class LLMBenchMode(Mode): + def __init__(self, cfg): + super().__init__(cfg) + self.perfRel = 0 + self.createCash() + + def isPerformanceBased(self): + return True + + def prepareRun(self, list, cfg): + super().prepareRun(list, cfg) + sampleCommit = list[0] + sampleCommit = sampleCommit.replace('"', "") + self.commonLogger.info( + "Prepare sample commit - {commit}".format(commit=sampleCommit) + ) + commitLogger = getCommitLogger(cfg, sampleCommit) + foundThroughput = 0 + isCommitCashed, cashedThroughput = self.getCommitIfCashed(sampleCommit) + if isCommitCashed: + logMsg = "Cashed commit - {commit}".format(commit=sampleCommit) + self.commonLogger.info(logMsg) + commitLogger.info(logMsg) + foundThroughput = cashedThroughput + else: + handleCommit(sampleCommit, cfg) + output = fetchAppOutput(cfg, sampleCommit) + commitLogger.info(output) + foundThroughput = re.search( + self.outPattern, output, flags=re.MULTILINE + ).group(1) + self.setCommitCash(sampleCommit, float(foundThroughput)) + self.sampleThroughput = float(foundThroughput) + return list + + def checkCfg(self, cfg): + super().checkCfg(cfg) + if not ("perfAppropriateDeviation" in cfg["runConfig"]): + raise CfgError("Appropriate deviation is not configured") + else: + self.apprDev = cfg["runConfig"]["perfAppropriateDeviation"] + if ("metric" in cfg["runConfig"]): + self.outPattern = self.specifyMetric(cfg["runConfig"]["metric"]) + else: + self.outPattern = self.specifyMetric() + + + def specifyMetric(self, metric: str = "First token latency"): + if metric in [ + "First token latency"]: + res = r"First token latency:\s*([0-9]*[.][0-9]*)\s*ms/token" + return res + raise CfgError("Metric {} is not supported".format(metric)) + + def preliminaryCheck(self, list, cfg): + # # model path checking - todo is necessary ? + # common if-degradation-exists check + super().preliminaryCheck(list, cfg) + + # performance - specific check if results for borders are stable, + isLeftStable = not cfg["preliminaryCheckCfg"]["leftCheck"] or\ + self.preliminaryStabilityCheck(list[0], cfg) + isRightStable = not cfg["preliminaryCheckCfg"]["rightCheck"] or\ + self.preliminaryStabilityCheck(list[-1], cfg) + if (not isLeftStable or not isRightStable): + raise PreliminaryAnalysisError( + "{lCommit} is {lStable}, {rCommit} is {rStable}".format( + lCommit=list[0], + rCommit=list[-1], + lStable="stable" if isLeftStable else "unstable", + rStable="stable" if isRightStable else "unstable" + ), + PreliminaryAnalysisError.PreliminaryErrType.UNSTABLE_APPLICATION + ) + + def compareCommits(self, lCommit: str, rCommit: str, cfg: map): + leftThroughput = self.getPseudoMetric(lCommit, cfg) + rightThroughput = self.getPseudoMetric(rCommit, cfg) + isBad, curRel = self.traversal.numericComparator( + leftThroughput, rightThroughput, self.apprDev + ) + if isBad: + self.perfRel = curRel + curCommit = rCommit.replace('"', "") + commitLogger = getCommitLogger(cfg, curCommit) + commitLogger.info("Performance relation is {rel}".format(rel=curRel)) + commitLogger.info( + "Commit is {status}".format(status=("bad" if isBad else "good")) + ) + return isBad + + def getPseudoMetric(self, commit, cfg): + commit = commit.replace('"', "") + curThroughput = 0 + commitLogger = getCommitLogger(cfg, commit) + isCommitCashed, cashedThroughput = self.getCommitIfCashed(commit) + pc = Mode.CommitPath.PathCommit( + commit, + Mode.CommitPath.CommitState.DEFAULT + ) + self.setOutputInfo(pc) + self.commitPath.accept(self.traversal, pc) + if isCommitCashed: + logMsg = "Cashed commit - {commit}".format(commit=commit) + self.commonLogger.info(logMsg) + commitLogger.info(logMsg) + curThroughput = cashedThroughput + else: + self.commonLogger.info("New commit: {commit}".format( + commit=commit) + ) + handleCommit(commit, cfg) + output = fetchAppOutput(cfg, commit) + commitLogger.info(output) + foundThroughput = re.search( + self.outPattern, output, flags=re.MULTILINE + ).group(1) + curThroughput = float(foundThroughput) + self.setCommitCash(commit, curThroughput) + return curThroughput + + def preliminaryStabilityCheck(self, commit, cfg): + commit = commit.replace('"', "") + curThroughput = 0 + + self.commonLogger.info( + "Preliminary check of commit: {commit}".format( + commit=commit) + ) + handleCommit(commit, cfg) + throughputList = [] + dev = self.apprDev = cfg["runConfig"]["perfAppropriateDeviation"] + for i in range(cfg["preliminaryCheckCfg"]["tryCount"]): + output = fetchAppOutput(cfg, commit) + foundThroughput = re.search( + self.outPattern, output, flags=re.MULTILINE + ).group(1) + curThroughput = float(foundThroughput) + throughputList.append(curThroughput) + resStable = checkStability(throughputList, dev) + if resStable: + self.setCommitCash(commit, curThroughput) + return resStable + + def setOutputInfo(self, pathCommit): + pathCommit.perfRel = self.perfRel + + def getCommitInfo(self, commit): + return "{ci}, perf. ratio = {d}".format( + ci=super().getCommitInfo(commit), + d=commit.perfRel) + + class AccuracyCheckerMode(Mode): def __init__(self, cfg): super().__init__(cfg) From 782f002626de6ec3565e0f62e8cebce336d24578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Krzemi=C5=84ski?= Date: Fri, 20 Dec 2024 13:44:04 +0100 Subject: [PATCH 40/60] [Spec] Add alignment mode description and Mersenne-Twister description to RandomUniform-8 (#26142) ### Details: - Added new alignment mode description - Added explanation of Mersenne-Twister algorithm with examples ### Tickets: - None --- .../generation/random-uniform-8.rst | 176 +++++++++++++++++- 1 file changed, 167 insertions(+), 9 deletions(-) diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/generation/random-uniform-8.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/generation/random-uniform-8.rst index 4013f2151a1b6f..26aad1eb161ace 100644 --- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/generation/random-uniform-8.rst +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/generation/random-uniform-8.rst @@ -15,12 +15,20 @@ RandomUniform **Detailed description**: *RandomUniform* operation generates random numbers from a uniform distribution in the range ``[minval, maxval)``. -The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm -is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns -four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized -with *global_seed* and *op_seed* attributes respectively. +The generation algorithm is based on an underlying random integer generator that uses either Philox or Mersnne-Twister algorithm. +Both algorithms are counter-based pseudo-random generators, which produce uint32 values. A single algorithm invocation returns +four result random values, depending on the given initial values. For Philox, these values are *key* and *counter*, for Mersenne-Twister it is a single *state* value. *Key* and *counter* are initialized +with *global_seed* and *op_seed* attributes respectively, while the *state* is only initialized using *global_seed*. -If both seed values equal to zero, RandomUniform generates non-deterministic sequence. +Algorithm selection allows to align the output of OpenVINO's Random Uniform op with the ones available in Tensorflow and PyTorch. +The *alignment* attribute selects which framework the output should be aligned to. Tensorflow uses the Philox algorithm and PyTorch uses the Mersenne-Twister algorithm. +For Tensorflow, this function is equivalent to the function tf.raw_ops.RandomUniform(shape, dtype, global_seed, op_seed) when dtype represents a real number, and tf.raw_ops.RandomUniformInt(shape, min\_val, max\_val, dtype, global\_seed, op\_seed) for integer types. Internally, both of these functions are executed by tf.random.uniform(shape, min\_val, max\_val, dtype, global\_seed, op\_seed), where for floating-point dtype the output goes through additional conversion to reside within a given range. +For PyTorch, this function is equivalent to the function torch.Tensor(shape, dtype).uniform\_(min\_val, max\_val) when dtype represents a real number, and torch.Tensor(shape, dtype).random\_(min\_val, max\_val) for integer types. Internally, both of these functions are executed by torch.rand(shape, dtype) with default generator and layout. The seed of these functions is provided by calling torch.manual\_seed(global\_seed). op\_seed value is ignored. +By default, the output is aligned with Tensorflow (Philox algorithm). This behavior is backwards-compatibile. + +If both seed values are equal to zero, RandomUniform generates a non-deterministic sequence. + +**Philox Algorithm Explaination**: .. math:: @@ -168,7 +176,7 @@ For integer values: where *x* is uint32 random value. -Example 1. *RandomUniform* output with ``global_seed`` = 150, ``op_seed`` = 10, ``output_type`` = f32: +Example 1. *RandomUniform* output with ``global_seed`` = 150, ``op_seed`` = 10, ``output_type`` = f32, ``alignment`` = TENSORFLOW: .. code-block:: xml :force: @@ -179,7 +187,7 @@ Example 1. *RandomUniform* output with ``global_seed`` = 150, ``op_seed`` = 10, [0.5197197 0.22727466 0.991374 ]] -Example 2. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, ``output_type`` = double: +Example 2. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, ``output_type`` = double, ``alignment`` = TENSORFLOW: .. code-block:: xml :force: @@ -194,7 +202,7 @@ Example 2. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, [2.67008206 2.36423758]] -Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, ``output_type`` = i32: +Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, ``output_type`` = i32, ``alignment`` = TENSORFLOW: .. code-block:: xml :force: @@ -208,6 +216,148 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, output = [[65 70 56] [59 82 92]] +------------------------------------------------------- + +Mersenne-Twister Algorithm Explanation: + +Link to the original paper Mersenne Twister: Mersenne twister: a 623-dimensionally equidistributed uniform pseudo-random number generator __. + +The Mersenne-Twister algorithm generates random numbers by initializing a state array with a seed and then iterating through a series of transformations. +Suppose we have n which determines the n-th element of the random sequence. + +The initial state array is generated recursively using the following formula: + +.. math:: + + state[0] = global_seed & 0xffffffff; + state[i] = 1812433253 * state[i-1] ^ (state[i-1] >> 30) + i + +where the value of i cannot exceed 623. + +The output is generated by tempering the state array: + +.. math:: + + y = state[i]\ + y = y \oplus (y >> u)\ + y = y \oplus ((y << s) & b)\ + y = y \oplus ((y << t) & c)\ + y = y \oplus (y >> l) + +where u, s, t, l, b, and c are constants. + +Whenever all state values are 'used', a new state array is generated recursively as follows: + +.. math:: + + current_state = state[i] + next_state = state[i+1] if i+1 <= 623 else state[0] + next_m_state = state[i+m] if i+m <= 623 else state[i+m-623] + + twisted_state = (((current_state & 0x80000000) | (next_state & 0x7fffffff)) >> 1) ^ (next_state & 1 ? 0x9908b0df : 0) + state[i] = next_m_state ^ twisted_state + +where m is a constant. + +For parity with PyTorch, the value of the constants is set as follows: + +.. math:: + + u = 11 + s = 7 + b = 0x9d2c5680 + t = 15 + c = 0xefc60000 + l = 18 + m = 397 + +These values follow the recommendations from the linked paper for MT19937. + +To convert a given unsigned int value (denoted as x below) to a specific type, a simple conversion is performed. +For float32: + +.. math:: + + mantissa_digits = 24 (mantissa / significand bits count of float + 1, equal to std::numeric_limits::digits == FLT_MANT_DIG == 24) + mask = uint32(uint64(1) << mantissa_digits - 1) + divisor = float(1) / (uint64(1) << mantissa_digits) + output = float((x & mask) * divisor) + +For float16: + + mantissa_digits = 11 (mantissa / significand bits count of float16 + 1, equal to 11) + mask = uint32(uint64(1) << mantissa_digits - 1) + divisor = float(1) / (uint64(1) << mantissa_digits) + output = float16((x & mask) * divisor) + +For bfloat16: + + mantissa_digits = 8 (mantissa / significand bits count of bfloat16 + 1, equal to 8) + mask = uint32(uint64(1) << mantissa_digits - 1) + divisor = float(1) / (uint64(1) << mantissa_digits) + output = bfloat16((x & mask) * divisor) + +For float64 (double precision requires the use of two uint32 values, denoted as x and y below): + + value = uint64(x) << 32 + y + + mantissa_digits = 53 (mantissa / significand bits count of double + 1, equal to std::numeric_limits::digits == DBL_MANT_DIG == 53) + mask = uint64(1) << mantissa_digits - 1 + divisor = double(1) / (uint64(1) << mantissa_digits) + output = double((x & mask) * divisor) + +All of the floating - point types above after the conversion fall between the values of 0 and 1. To convert them to reside between a range , a simple operation is performed: + +.. math:: + + output = x * (max - min) + min + +For integer types, no special conversion operation is done except for int64 when either min or max exceeds the maximum possible value of uint32. A simple operation to standardize the values is performed. +The special behavior (optimization) for int64 matches the expected output for PyTorch, normally a concatenation of 2 uint32s always occurs. +In other words: + +.. math:: + + if output is of int32 dtype: + output = int32(x) + else if output is of int64 dtype and (min <= max(uint32) and max <= max(uint32)): + output = int64(x) + else: + output = int64(x << 32 + y) (uses 2 uint32s instead of one) + + output = output % (max - min) + min + +Example 1. RandomUniform output with initial_seed = 150, output_type = f32, alignment = PYTORCH: +.. code-block:: xml + :force: + + input_shape = [ 3, 3 ] \\ + output = [[0.6789123 0.31274895 0.91842768] \\ + [0.9312087 0.13456984 0.49623574] \\ + [0.5082716 0.23938411 0.97856429]] + + +Example 2. RandomUniform output with initial_seed = 80, output_type = double, alignment = PYTORCH: + +.. code-block:: xml + :force: + + input_shape = [ 2, 2 ] \\ + minval = 2 \\ + maxval = 10 \\ + output = [[8.34928537 6.12348725] \\ + [3.76852914 2.89564172]] + +Example 3. RandomUniform output with initial_seed = 80, output_type = i32, alignment = PYTORCH: + +.. code-block:: xml + :force: + + input_shape = [ 2, 3 ] \\ + minval = 50 \\ + maxval = 100 \\ + output = [[89 73 68] \\ + [95 78 61]] **Attributes**: @@ -234,6 +384,14 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, * **Default value**: 0 * **Required**: *Yes* +* ``alignment`` + + * **Description**: the framework to align the output to. + * **Range of values**: TENSORFLOW, PYTORCH + * **Type**: `string` + * **Default value**: TENSORFLOW + * **Required**: *No* + **Inputs**: * **1**: ``shape`` - 1D tensor of type *T_SHAPE* describing output shape. **Required.** @@ -245,7 +403,7 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, **Outputs**: -* **1**: A tensor with type specified by the attribute *output_type* and shape defined by ``shape`` input tensor. +* **1**: A tensor with type specified by the attribute *output_type* and shape defined by ``shape`` input tensor, with values aligned to the framework selected by the ``alignment`` attribute. **Types** From 844809eb974a4b1a8affd1dc720b33bf9c9dd987 Mon Sep 17 00:00:00 2001 From: zm6int Date: Fri, 20 Dec 2024 21:03:38 +0800 Subject: [PATCH 41/60] [tools] [sit] Add feature to skip layers in SIT (#27852) - Add a command line option to skip checking layers when comparing output with SIT. ### Details: - Currently only suitable for NRMSE and RRMSE mode. ### Tickets: - EISW-148919 --------- Co-authored-by: Artemy Skrebkov --- .../tools/single-image-test/main.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/plugins/intel_npu/tools/single-image-test/main.cpp b/src/plugins/intel_npu/tools/single-image-test/main.cpp index 2454e9afd40e0d..699e252eacf181 100644 --- a/src/plugins/intel_npu/tools/single-image-test/main.cpp +++ b/src/plugins/intel_npu/tools/single-image-test/main.cpp @@ -95,6 +95,8 @@ DEFINE_string( DEFINE_string(data_shape, "", "Required for models with dynamic shapes. Set shape for input blobs. Only one shape can be set." "In case of one input size: \"[1,3,224,224]\""); +DEFINE_string(skip_output_layers, "" , "Skip output layers from the network. Currently only applicable for" + "RRMSE and NRMSE mode. Accept ';' separated list of output layers"); // for using input image mean and scale static constexpr char mean_values_message[] = @@ -247,6 +249,7 @@ void parseCommandLine(int argc, char* argv[]) { std::cout << " Performance counters: " << FLAGS_pc << std::endl; std::cout << " Mean_values [channel1,channel2,channel3] " << FLAGS_mean_values << std::endl; std::cout << " Scale_values [channel1,channel2,channel3] " << FLAGS_scale_values << std::endl; + std::cout << " Skip checking output layers: " << FLAGS_skip_output_layers << std::endl; if (FLAGS_run_test) { std::cout << " Reference files direcotry: " << (FLAGS_ref_dir.empty() ? "Current directory" : FLAGS_ref_dir) << std::endl; @@ -1329,7 +1332,15 @@ bool testRRMSE(const TensorMap& outputs, const TensorMap& references, size_t bat return false; } + std::vector skipped_layers; + skipped_layers = splitStringList(FLAGS_skip_output_layers, ';'); + for (const auto& [tensorName, output] : outputs) { + if (std::find(skipped_layers.begin(), skipped_layers.end(), tensorName) != skipped_layers.end()) { + std::cout << "Skip RRMSE test for layers: " << tensorName << std::endl; + continue; + } + auto referencesIterator = references.find(tensorName); OPENVINO_ASSERT(referencesIterator != references.end()); @@ -1397,7 +1408,15 @@ bool testNRMSE(const TensorMap& outputs, const TensorMap& references, size_t bat return false; } + std::vector skipped_layers; + skipped_layers = splitStringList(FLAGS_skip_output_layers, ';'); + for (const auto& [tensorName, output] : outputs) { + if (std::find(skipped_layers.begin(), skipped_layers.end(), tensorName) != skipped_layers.end()) { + std::cout << "Skip NRMSE test for layers: " << tensorName << std::endl; + continue; + } + auto referencesIterator = references.find(tensorName); OPENVINO_ASSERT(referencesIterator != references.end()); From 89f3fc97764fc42dfdd8e95ef40e3e97fe3a26c2 Mon Sep 17 00:00:00 2001 From: Yury Gaydaychuk Date: Fri, 20 Dec 2024 17:22:13 +0100 Subject: [PATCH 42/60] [CPU][GHA] CPU tests on riscV arch (#25673) ### Details: - *Implemented OV CPU tests pipeline with riscV architecture via Xuantie platform (fetched frombinaries, builded by @a-sidorova , sources https://github.com/XUANTIE-RV/xuantie-gnu-toolchain)*) - *todo: python support, building job reusing, implementing support python runner (i.e. using wrapping xuantie application instead of gtests directly)* Implementation of toolchain build - https://github.com/openvinotoolkit/openvino/pull/28142 ### Tickets: - *146964* --------- Co-authored-by: Mikhail Ryzhov --- .github/dockerfiles/docker_tag | 2 +- .../ov_build/ubuntu_22_04_riscv/Dockerfile | 5 +- .github/workflows/dev_cpu_linux_riscv.yml | 269 ++++++++++++++++++ 3 files changed, 274 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/dev_cpu_linux_riscv.yml diff --git a/.github/dockerfiles/docker_tag b/.github/dockerfiles/docker_tag index 1dc77e89521bfe..6e17dfb246030a 100644 --- a/.github/dockerfiles/docker_tag +++ b/.github/dockerfiles/docker_tag @@ -1 +1 @@ -pr-27882 +pr-25673 diff --git a/.github/dockerfiles/ov_build/ubuntu_22_04_riscv/Dockerfile b/.github/dockerfiles/ov_build/ubuntu_22_04_riscv/Dockerfile index 5911016b37d008..8b955def2aec00 100644 --- a/.github/dockerfiles/ov_build/ubuntu_22_04_riscv/Dockerfile +++ b/.github/dockerfiles/ov_build/ubuntu_22_04_riscv/Dockerfile @@ -62,10 +62,13 @@ RUN echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricte RUN dpkg --add-architecture riscv64 && \ apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/riscv64-sources.list && \ - apt-get install -y --no-install-recommends libpython3-dev:riscv64 + apt-get install -y --no-install-recommends libpython3-dev:riscv64 && \ + apt-get install libgomp1:riscv64 && \ + apt-get install libatomic1:riscv64 # Setup pip ENV PIP_VERSION="24.0" RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \ python3 get-pip.py --no-cache-dir pip==${PIP_VERSION} && \ rm -f get-pip.py + diff --git a/.github/workflows/dev_cpu_linux_riscv.yml b/.github/workflows/dev_cpu_linux_riscv.yml new file mode 100644 index 00000000000000..daeb42149539f8 --- /dev/null +++ b/.github/workflows/dev_cpu_linux_riscv.yml @@ -0,0 +1,269 @@ +name: Linux RISC-V CPU workflow with Xuantie (Ubuntu 22.04, Python 3.10) + +on: + workflow_dispatch: + inputs: + testFilter: + description: 'Filter for google tests' + required: true + default: '*smoke_AdaPoolAvg4DLayoutTest*' + pull_request: + paths: + - '.github/workflows/dev_cpu_linux_riscv.yml' + +env: + CMAKE_GENERATOR: 'Ninja Multi-Config' + XUANTIE_BIN_PATH: /mount/testdata1 + XUANTIE_DIR: /__w/openvino/xuantie + XUANTIE_TAR_NAME: 'Xuantie-900-gcc-linux-5.15.0-glibc-x86_64-V2.8.1' + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-dev-cpu-linux-riscv + cancel-in-progress: true + +permissions: read-all + +jobs: + Smart_CI: + runs-on: ubuntu-latest + outputs: + affected_components: "${{ steps.smart_ci.outputs.affected_components }}" + changed_components: "${{ steps.smart_ci.outputs.changed_components }}" + skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}" + steps: + - name: checkout action + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + sparse-checkout: .github/actions/smart-ci + + - name: Get affected components + id: smart_ci + uses: ./.github/actions/smart-ci + with: + repository: ${{ github.repository }} + pr: ${{ github.event.number }} + commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} + component_pattern: "category: (.*)" + repo_token: ${{ secrets.GITHUB_TOKEN }} + skip_when_only_listed_labels_set: 'docs' + skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg,*/layer_tests_summary/*,*/conformance/*' + + Docker: + needs: Smart_CI + runs-on: aks-linux-4-cores-16gb-docker-build + container: + image: openvinogithubactions.azurecr.io/docker_build:0.2 + volumes: + - /mount:/mount + outputs: + images: "${{ steps.handle_docker.outputs.images }}" + steps: + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - uses: ./.github/actions/handle_docker + id: handle_docker + with: + images: | + ov_build/ubuntu_22_04_riscv + registry: 'openvinogithubactions.azurecr.io' + dockerfiles_root_dir: '.github/dockerfiles' + changed_components: ${{ needs.smart_ci.outputs.changed_components }} + + Build: + needs: [Smart_CI, Docker] + timeout-minutes: 150 + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores-32gb + container: + image: ${{ fromJSON(needs.docker.outputs.images).ov_build.ubuntu_22_04_riscv }} + volumes: + - /mount:/mount + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + GITHUB_WORKSPACE: '/__w/openvino/openvino' + OPENVINO_REPO: /__w/openvino/openvino/openvino + INSTALL_DIR: /__w/openvino/openvino/openvino_install + INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install + BUILD_DIR: /__w/openvino/openvino/openvino_build + CCACHE_REMOTE_DIR: /mount/caches/ccache/ubuntu22_riscv64_xuantie/${{ github.base_ref || github.ref_name }} + CCACHE_DIR: /__w/openvino/openvino/ccache + CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp + CCACHE_MAXSIZE: 2G + if: "!needs.smart_ci.outputs.skip_workflow" + + steps: + - name: Clone OpenVINO + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + path: ${{ env.OPENVINO_REPO }} + submodules: 'true' + + # + # Print system info + # + + - name: System info + uses: ./openvino/.github/actions/system_info + + - name: Setup ccache + id: ccache_restore + uses: ./openvino/.github/actions/cache + with: + save-always: ${{ github.event_name == 'push' && 'true' || 'false' }} + cache-size: 10 + max-cache-size: 50 + cache-path: ${{ env.CCACHE_REMOTE_DIR }} + path: ${{ env.CCACHE_DIR }} + key: ${{ runner.os }}-${{ runner.arch }}-ccache-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-${{ runner.arch }}-ccache + + - name: Init XUANTIE + run: | + mkdir ${XUANTIE_DIR} + tar -xvf ${XUANTIE_BIN_PATH}/${XUANTIE_TAR_NAME}.tar -C ${XUANTIE_DIR} + chmod -R +x ${XUANTIE_DIR}/${XUANTIE_TAR_NAME} + + - name: Clean ccache stats + run: ccache --zero-stats + + - name: CMake configure - OpenVINO + run: | + cmake \ + -G "${CMAKE_GENERATOR}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_TESTS=ON \ + -DENABLE_INTEL_CPU=ON \ + -DENABLE_INTEL_GPU=OFF \ + -DENABLE_INTEL_NPU=OFF \ + -DENABLE_SAMPLES=OFF \ + -DCMAKE_TOOLCHAIN_FILE=${OPENVINO_REPO}/cmake/toolchains/riscv64-071-xuantie-gnu.toolchain.cmake \ + -DRISCV_TOOLCHAIN_ROOT=${XUANTIE_DIR}/${XUANTIE_TAR_NAME} \ + -DENABLE_PYTHON=OFF \ + -DENABLE_PYTHON_PACKAGING=ON \ + -DENABLE_WHEEL=OFF \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DCPACK_GENERATOR=TGZ \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -S ${OPENVINO_REPO} \ + -B ${BUILD_DIR} + + - name: Cmake build - OpenVINO + run: cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} + + - name: Show ccache stats + run: ccache --show-stats + + - name: Cmake install - OpenVINO + run: | + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_TEST_DIR} -DCOMPONENT=tests -P ${BUILD_DIR}/cmake_install.cmake + + - name: Pack Artifacts + run: | + + pushd ${INSTALL_DIR} + tar -czvf ${BUILD_DIR}/openvino_package.tar.gz * + popd + + pushd ${INSTALL_TEST_DIR} + tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz * + popd + + # # + # # Upload build artifacts + # # + + - name: Upload openvino package + if: ${{ always() }} + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: openvino_package + path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz + if-no-files-found: 'error' + + - name: Upload openvino tests package + if: ${{ always() }} + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: openvino_tests + path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz + if-no-files-found: 'error' + + - name: Clean ccache + run: ccache --cleanup + + CPU_Functional_Tests: + name: CPU functional tests + needs: [ Docker, Build, Smart_CI ] + timeout-minutes: 30 + runs-on: aks-linux-4-cores-16gb + container: + image: ${{ fromJSON(needs.docker.outputs.images).ov_build.ubuntu_22_04_riscv }} + volumes: + - /mount:/mount + defaults: + run: + shell: bash + env: + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + GTEST_FILTER: ${{ github.event_name == 'workflow_dispatch' && inputs.testFilter || '*smoke_AdaPoolAvg4DLayoutTest*' }} + + steps: + - name: Init XUANTIE + run: | + mkdir ${XUANTIE_DIR} + tar -xvf ${XUANTIE_BIN_PATH}/${XUANTIE_TAR_NAME}.tar -C ${XUANTIE_DIR} + chmod -R +x ${XUANTIE_DIR}/${XUANTIE_TAR_NAME} + + - name: Download OpenVINO package + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd $INSTALL_DIR + tar -xzf openvino_package.tar.gz -C $INSTALL_DIR + popd + + pushd $INSTALL_TEST_DIR + tar -xzf openvino_tests.tar.gz -C $INSTALL_TEST_DIR + popd + + - name: Intel CPU plugin func tests + run: | + # Needed as the Linux CC does not require setupvars to work + if [[ -f "${INSTALL_DIR}/setupvars.sh" ]]; then + source ${INSTALL_DIR}/setupvars.sh + fi + # Needed as ze_loader.so is under INSTALL_TEST_DIR + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${INSTALL_TEST_DIR}/tests + ${XUANTIE_DIR}/${XUANTIE_TAR_NAME}/bin/qemu-riscv64 -cpu c910v ${INSTALL_TEST_DIR}/tests/ov_cpu_func_tests --gtest_filter=${{ env.GTEST_FILTER }} --gtest_print_time=1 + timeout-minutes: 25 + From 9edb68b24bf32c33c692890185620123b6d98770 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Fri, 20 Dec 2024 18:22:52 +0100 Subject: [PATCH 43/60] [RTTI] Apply OPENVINO_MATCHER_PASS_RTTI in transformations (#28152) ### Details: - Applied OPENVINO_MATCHER_PASS_RTTI helper macro in Transformations (except in op_convertions - done in #28150). ### Tickets: - CVS-159567 --------- Signed-off-by: Tomasz Jankowski Co-authored-by: Michal Lukaszewski --- .../adaptive_pool_to_reduce.hpp | 2 +- .../add_fake_quantize_fusion.hpp | 2 +- .../align_eltwise_input_ranks.hpp | 2 +- .../augru_cell_fusion.hpp | 2 +- .../batch_to_space_fusion.hpp | 2 +- .../common_optimizations/binarize_weights.hpp | 2 +- .../broadcast_elementwise_fusion.hpp | 2 +- .../broadcast_transition.hpp | 2 +- .../common_optimizations/clamp_fusion.hpp | 2 +- .../compress_float_constants.hpp | 2 +- .../common_optimizations/concat_fusion.hpp | 2 +- .../concat_reduce_fusion.hpp | 4 +-- .../concat_to_broadcast.hpp | 4 +-- .../common_optimizations/conv_mul_fusion.hpp | 8 ++--- .../conv_to_binary_conv.hpp | 2 +- .../convert_quantize_dequantize.hpp | 2 +- ...onvert_u4_weights_zero_point_to_scalar.hpp | 2 +- ...onvolution_to_group_convolution_fusion.hpp | 2 +- .../depth_to_space_fusion.hpp | 2 +- .../dilated_convolution_converter.hpp | 2 +- ...isable_random_uniform_constant_folding.hpp | 2 +- .../disable_shapeof_constant_folding.hpp | 2 +- .../common_optimizations/divide_fusion.hpp | 2 +- .../dropout_with_random_uniform_replacer.hpp | 2 +- .../eliminate_duplicate_ti_inputs.hpp | 2 +- .../eliminate_loop_inputs_outputs.hpp | 2 +- .../eliminate_unsqueeze_gather.hpp | 4 +-- .../enable_shapeof_constant_folding.hpp | 2 +- .../fold_subgraph_empty_inputs.hpp | 2 +- .../common_optimizations/fq_mul_fusion.hpp | 2 +- .../fq_reshape_fusion.hpp | 2 +- .../fuse_rotary_positional_embeddings.hpp | 18 +++++----- .../common_optimizations/gelu_fusion.hpp | 12 +++---- .../common_optimizations/glu_fusion.hpp | 2 +- .../common_optimizations/gru_cell_fusion.hpp | 2 +- .../common_optimizations/hsigmoid_fusion.hpp | 10 +++--- .../common_optimizations/hswish_fusion.hpp | 8 ++--- .../interpolate_sequence_fusion.hpp | 2 +- .../leaky_relu_fusion.hpp | 2 +- .../lin_op_sequence_fusion.hpp | 6 ++-- .../lora_subgraph_fusion.hpp | 2 +- .../common_optimizations/lstm_cell_fusion.hpp | 4 +-- ..._rope_input_to_keep_in_mixed_precision.hpp | 4 +-- .../matmul_const_transposes_extraction.hpp | 2 +- .../matmul_multiply_fusion.hpp | 2 +- .../common_optimizations/mish_fusion.hpp | 2 +- .../move_eltwise_up_data_movement.hpp | 4 +-- .../common_optimizations/mul_conv_fusion.hpp | 8 ++--- .../mul_fake_quantize_fusion.hpp | 2 +- .../common_optimizations/mvn_fusion.hpp | 4 +-- .../nearest_neighbor_upsampling_fusion.hpp | 2 +- .../nonzero_horizontal_fusion.hpp | 2 +- .../common_optimizations/nop_elimination.hpp | 36 +++++++++---------- .../normalize_l2_fusion.hpp | 2 +- .../optimize_strided_slice.hpp | 2 +- .../common_optimizations/pad_fusion.hpp | 10 +++--- .../common_optimizations/prelu_fusion.hpp | 12 +++---- .../pull_through_reduce.hpp | 4 +-- .../pull_transpose_through_fq.hpp | 2 +- .../random_uniform_fusion.hpp | 2 +- .../common_optimizations/reduce_merge.hpp | 2 +- .../reduce_reshape_fusion.hpp | 2 +- .../relu_fake_quantize_fusion.hpp | 2 +- .../remove_concat_zero_dim_input.hpp | 2 +- .../remove_filtering_boxes_by_size.hpp | 2 +- .../common_optimizations/reshape_prelu.hpp | 2 +- .../reshape_sequence_fusion.hpp | 2 +- .../common_optimizations/rms_fusion.hpp | 2 +- .../select_with_one_value_condition.hpp | 2 +- .../common_optimizations/sequence_fusion.hpp | 2 +- .../shuffle_channels_fusion.hpp | 2 +- .../simplify_shape_of_sub_graph.hpp | 10 +++--- ...ip_gather_before_transpose_and_reshape.hpp | 2 +- .../common_optimizations/softmax_fusion.hpp | 2 +- .../common_optimizations/softplus_fusion.hpp | 2 +- .../softplus_to_mish_fusion.hpp | 2 +- .../space_to_batch_fusion.hpp | 2 +- ...plit_concat_pair_to_interpolate_fusion.hpp | 2 +- .../split_squeeze_concat_fusion.hpp | 2 +- .../strides_optimization.hpp | 6 ++-- .../common_optimizations/subtract_fusion.hpp | 2 +- .../common_optimizations/swish_fusion.hpp | 8 ++--- ...anspose_reshape_elimination_for_matmul.hpp | 2 +- .../transpose_sinking.hpp | 10 +++--- .../transpose_to_reshape.hpp | 2 +- .../weights_dequantize_to_fake_quantize.hpp | 2 +- .../wrap_interpolate_into_transposes.hpp | 2 +- .../flush_fp32_subnormals_to_zero.hpp | 2 +- ...decompression_convert_constant_folding.hpp | 10 +++--- .../mark_floatpoint_range.hpp | 4 +-- .../mark_dequantization_subgraph.hpp | 4 +-- .../position_ids_replacer.hpp | 4 +-- .../prev_sequence_length_pattern.hpp | 4 +-- .../state_management_pattern.hpp | 4 +-- .../total_sequence_length_pattern.hpp | 2 +- .../broadcast_const_range_replacement.hpp | 2 +- .../smart_reshape/matmul_sr.hpp | 6 ++-- .../proposal_scales_stridedslice.hpp | 4 +-- .../smart_reshape/reshape_sinking.hpp | 2 +- .../smart_reshape/reshape_to_1D.hpp | 2 +- .../smart_reshape/shape_of_const_folding.hpp | 2 +- .../smart_reshape/strided_slice_squeeze.hpp | 4 +-- .../chained_maximum.hpp | 4 +-- .../dereshape_matmul.hpp | 4 +-- .../nop_broadcast.hpp | 4 +-- .../reshape_optimizations.hpp | 2 +- .../symbolic_optimizations.hpp | 2 +- .../transpose_sinking/ts_base.hpp | 2 +- .../transpose_sinking/ts_binary.hpp | 2 +- .../transpose_sinking/ts_concat.hpp | 2 +- .../transpose_sinking/ts_cumsum.hpp | 2 +- .../transpose_sinking/ts_data_movement.hpp | 2 +- .../transpose_sinking/ts_fuse.hpp | 4 +-- .../transpose_sinking/ts_gather.hpp | 4 +-- .../transpose_sinking/ts_interpolate.hpp | 2 +- .../transpose_sinking/ts_reduction.hpp | 4 +-- .../ts_reset_no_sinking_attribute.hpp | 2 +- .../transpose_sinking/ts_slice.hpp | 2 +- .../transpose_sinking/ts_split.hpp | 2 +- .../transpose_sinking/ts_squeeze.hpp | 2 +- .../transpose_sinking/ts_tile.hpp | 2 +- .../transpose_sinking/ts_unary.hpp | 2 +- .../transpose_sinking/ts_unsqueeze.hpp | 2 +- .../convert_nms_gather_path_to_unsigned.cpp | 6 ++-- .../common_optimizations/ric_fusion.cpp | 14 ++++++++ ...k_subgraphs_to_keep_in_mixed_precision.cpp | 14 ++++---- .../convert_ti_to_sequences.cpp | 1 + 127 files changed, 242 insertions(+), 227 deletions(-) diff --git a/src/common/transformations/include/transformations/common_optimizations/adaptive_pool_to_reduce.hpp b/src/common/transformations/include/transformations/common_optimizations/adaptive_pool_to_reduce.hpp index 06dde1ff0bbf63..be271bc71f4b55 100644 --- a/src/common/transformations/include/transformations/common_optimizations/adaptive_pool_to_reduce.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/adaptive_pool_to_reduce.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API AdaptivePoolToReduce; class ov::pass::AdaptivePoolToReduce : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AdaptivePoolToReduce", "0"); + OPENVINO_MATCHER_PASS_RTTI("AdaptivePoolToReduce"); AdaptivePoolToReduce(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/add_fake_quantize_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/add_fake_quantize_fusion.hpp index f63f00a455a117..3dc06301c838f2 100644 --- a/src/common/transformations/include/transformations/common_optimizations/add_fake_quantize_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/add_fake_quantize_fusion.hpp @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API AddFakeQuantizeFusion; */ class ov::pass::AddFakeQuantizeFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AddFakeQuantizeFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("AddFakeQuantizeFusion"); AddFakeQuantizeFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/align_eltwise_input_ranks.hpp b/src/common/transformations/include/transformations/common_optimizations/align_eltwise_input_ranks.hpp index 1a7578d3551903..c18f1c96bc41bc 100644 --- a/src/common/transformations/include/transformations/common_optimizations/align_eltwise_input_ranks.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/align_eltwise_input_ranks.hpp @@ -17,7 +17,7 @@ namespace pass { class TRANSFORMATIONS_API AlignEltwiseInputRanks : public MatcherPass { public: - OPENVINO_RTTI("AlignEltwiseInputRanks", "0"); + OPENVINO_MATCHER_PASS_RTTI("AlignEltwiseInputRanks"); AlignEltwiseInputRanks(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/augru_cell_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/augru_cell_fusion.hpp index c6773de5ec894f..2d458e5e18f87f 100644 --- a/src/common/transformations/include/transformations/common_optimizations/augru_cell_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/augru_cell_fusion.hpp @@ -32,6 +32,6 @@ class TRANSFORMATIONS_API AUGRUCellFusion; class ov::pass::AUGRUCellFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AUGRUCellFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("AUGRUCellFusion"); AUGRUCellFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/batch_to_space_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/batch_to_space_fusion.hpp index 7b05f721f52400..4ab3210a633798 100644 --- a/src/common/transformations/include/transformations/common_optimizations/batch_to_space_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/batch_to_space_fusion.hpp @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API BatchToSpaceFusion; class ov::pass::BatchToSpaceFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BatchToSpaceFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("BatchToSpaceFusion"); BatchToSpaceFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/binarize_weights.hpp b/src/common/transformations/include/transformations/common_optimizations/binarize_weights.hpp index 3803281478a055..2e975e27b26f97 100644 --- a/src/common/transformations/include/transformations/common_optimizations/binarize_weights.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/binarize_weights.hpp @@ -76,6 +76,6 @@ class TRANSFORMATIONS_API BinarizeWeights; class ov::pass::BinarizeWeights : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BinarizeWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("BinarizeWeights"); BinarizeWeights(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/broadcast_elementwise_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/broadcast_elementwise_fusion.hpp index 38ae4799e932a0..0fffa95e62b04c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/broadcast_elementwise_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/broadcast_elementwise_fusion.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API BroadcastElementwiseFusion; class ov::pass::BroadcastElementwiseFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BroadcastElementwiseFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("BroadcastElementwiseFusion"); BroadcastElementwiseFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/broadcast_transition.hpp b/src/common/transformations/include/transformations/common_optimizations/broadcast_transition.hpp index 089f1472b7c431..ff9b8151e7e47f 100644 --- a/src/common/transformations/include/transformations/common_optimizations/broadcast_transition.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/broadcast_transition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API BroadcastTransition; */ class ov::pass::BroadcastTransition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BroadcastTransition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BroadcastTransition"); BroadcastTransition(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/clamp_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/clamp_fusion.hpp index fe966323edbb98..69870870d8758a 100644 --- a/src/common/transformations/include/transformations/common_optimizations/clamp_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/clamp_fusion.hpp @@ -29,6 +29,6 @@ class TRANSFORMATIONS_API ClampFusion; class ov::pass::ClampFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ClampFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ClampFusion"); ClampFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp b/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp index 77bac5753bd757..d8e0eb8c154766 100644 --- a/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp @@ -25,7 +25,7 @@ bool TRANSFORMATIONS_API is_model_optimized(const std::shared_ptr& mo */ class ov::pass::CompressFloatConstantsImpl : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CompressFloatConstantsImpl", "0"); + OPENVINO_MATCHER_PASS_RTTI("CompressFloatConstantsImpl"); /// @brief Transformation constructor /// @param postponed If true then the transformation won't compress the constants /// keeping them in the original type but still will insert Converts. This is diff --git a/src/common/transformations/include/transformations/common_optimizations/concat_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/concat_fusion.hpp index 31b710722bf13f..2642e0cc35c39c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/concat_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/concat_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ConcatFusion; class ov::pass::ConcatFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConcatFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConcatFusion"); ConcatFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp index 4aa5391dd42618..ae02a2a50b4a2b 100644 --- a/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp @@ -24,7 +24,7 @@ class TRANSFORMATIONS_API ConcatReduceFusion; */ class ov::pass::ReplaceConcatReduceByMinOrMax : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReplaceConcatReduceByMinOrMax", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReplaceConcatReduceByMinOrMax"); ReplaceConcatReduceByMinOrMax(); }; @@ -34,7 +34,7 @@ class ov::pass::ReplaceConcatReduceByMinOrMax : public ov::pass::MatcherPass { */ class ov::pass::PullSqueezeThroughEltwise : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PullSqueezeThroughEltwise", "0"); + OPENVINO_MATCHER_PASS_RTTI("PullSqueezeThroughEltwise"); PullSqueezeThroughEltwise(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/concat_to_broadcast.hpp b/src/common/transformations/include/transformations/common_optimizations/concat_to_broadcast.hpp index ddb20338c0b01d..7859673f08e309 100644 --- a/src/common/transformations/include/transformations/common_optimizations/concat_to_broadcast.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/concat_to_broadcast.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConcatToBroadcast; */ class ov::pass::ConcatToBroadcast : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConcatToBroadcast", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConcatToBroadcast"); ConcatToBroadcast(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp index 9e2ba5194367ba..0ae7cc67bd4be4 100644 --- a/src/common/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp @@ -23,24 +23,24 @@ class TRANSFORMATIONS_API GroupConvolutionBackpropDataMultiplyFusion; class ov::pass::ConvolutionMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvolutionMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvolutionMultiplyFusion"); ConvolutionMultiplyFusion(); }; class ov::pass::GroupConvolutionMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GroupConvolutionMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("GroupConvolutionMultiplyFusion"); GroupConvolutionMultiplyFusion(); }; class ov::pass::ConvolutionBackpropDataMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvolutionBackpropDataMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvolutionBackpropDataMultiplyFusion"); ConvolutionBackpropDataMultiplyFusion(); }; class ov::pass::GroupConvolutionBackpropDataMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GroupConvolutionBackpropDataMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("GroupConvolutionBackpropDataMultiplyFusion"); GroupConvolutionBackpropDataMultiplyFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/conv_to_binary_conv.hpp b/src/common/transformations/include/transformations/common_optimizations/conv_to_binary_conv.hpp index a60a15d37be9e7..b87013067ca18c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/conv_to_binary_conv.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/conv_to_binary_conv.hpp @@ -72,6 +72,6 @@ class TRANSFORMATIONS_API ConvToBinaryConv; */ class ov::pass::ConvToBinaryConv : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvToBinaryConv", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvToBinaryConv"); ConvToBinaryConv(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/convert_quantize_dequantize.hpp b/src/common/transformations/include/transformations/common_optimizations/convert_quantize_dequantize.hpp index 7c0b6b5be95d5d..7dc5639d5e7cf4 100644 --- a/src/common/transformations/include/transformations/common_optimizations/convert_quantize_dequantize.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/convert_quantize_dequantize.hpp @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API ConvertQuantizeDequantize; class ov::pass::ConvertQuantizeDequantize : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertQuantizeDequantize", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertQuantizeDequantize"); ConvertQuantizeDequantize(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/convert_u4_weights_zero_point_to_scalar.hpp b/src/common/transformations/include/transformations/common_optimizations/convert_u4_weights_zero_point_to_scalar.hpp index dfd9eef8069665..f20716cbdc7023 100644 --- a/src/common/transformations/include/transformations/common_optimizations/convert_u4_weights_zero_point_to_scalar.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/convert_u4_weights_zero_point_to_scalar.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertU4WeightsZeroPointToScalar; */ class ov::pass::ConvertU4WeightsZeroPointToScalar : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertU4WeightsZeroPointToScalar", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertU4WeightsZeroPointToScalar"); ConvertU4WeightsZeroPointToScalar(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/convolution_to_group_convolution_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/convolution_to_group_convolution_fusion.hpp index a99d4ea801d8ec..b6b909f23afc18 100644 --- a/src/common/transformations/include/transformations/common_optimizations/convolution_to_group_convolution_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/convolution_to_group_convolution_fusion.hpp @@ -24,7 +24,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvolutionToGroupConvolutionFusion : public MatcherPass { public: - OPENVINO_RTTI("ConvolutionToGroupConvolutionFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvolutionToGroupConvolutionFusion"); ConvolutionToGroupConvolutionFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/depth_to_space_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/depth_to_space_fusion.hpp index 53e6b623d67d5a..2aa5b8f0efa8c1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/depth_to_space_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/depth_to_space_fusion.hpp @@ -42,6 +42,6 @@ class TRANSFORMATIONS_API DepthToSpaceFusion; class ov::pass::DepthToSpaceFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DepthToSpaceFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("DepthToSpaceFusion"); DepthToSpaceFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/dilated_convolution_converter.hpp b/src/common/transformations/include/transformations/common_optimizations/dilated_convolution_converter.hpp index 204190cafd305c..f946a1792c0323 100644 --- a/src/common/transformations/include/transformations/common_optimizations/dilated_convolution_converter.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/dilated_convolution_converter.hpp @@ -29,6 +29,6 @@ class TRANSFORMATIONS_API DilatedConvolutionConverter; class ov::pass::DilatedConvolutionConverter : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DilatedConvolutionConverter", "0"); + OPENVINO_MATCHER_PASS_RTTI("DilatedConvolutionConverter"); DilatedConvolutionConverter(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp b/src/common/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp index 405e8ff7288e5d..de0840c0dce97f 100644 --- a/src/common/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/disable_random_uniform_constant_folding.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API DisableRandomUniformConstantFolding; */ class ov::pass::DisableRandomUniformConstantFolding : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DisableRandomUniformConstantFolding", "0"); + OPENVINO_MATCHER_PASS_RTTI("DisableRandomUniformConstantFolding"); DisableRandomUniformConstantFolding(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/disable_shapeof_constant_folding.hpp b/src/common/transformations/include/transformations/common_optimizations/disable_shapeof_constant_folding.hpp index 1526fea966347d..912c21a29ba7c6 100644 --- a/src/common/transformations/include/transformations/common_optimizations/disable_shapeof_constant_folding.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/disable_shapeof_constant_folding.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API DisableShapeOfConstantFolding; class ov::pass::DisableShapeOfConstantFolding : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DisableShapeOfConstantFolding", "0"); + OPENVINO_MATCHER_PASS_RTTI("DisableShapeOfConstantFolding"); explicit DisableShapeOfConstantFolding(bool check_shape = true); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/divide_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/divide_fusion.hpp index 37a5b1f66a5551..fb9f8d5b122110 100644 --- a/src/common/transformations/include/transformations/common_optimizations/divide_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/divide_fusion.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API DivideFusion; */ class ov::pass::DivideFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DivideFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("DivideFusion"); DivideFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/dropout_with_random_uniform_replacer.hpp b/src/common/transformations/include/transformations/common_optimizations/dropout_with_random_uniform_replacer.hpp index 2ad17f308d46bc..6fdfaa7cc7caa1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/dropout_with_random_uniform_replacer.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/dropout_with_random_uniform_replacer.hpp @@ -36,6 +36,6 @@ class TRANSFORMATIONS_API DropoutWithRandomUniformReplacer; */ class ov::pass::DropoutWithRandomUniformReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DropoutWithRandomUniformReplacer", "0"); + OPENVINO_MATCHER_PASS_RTTI("DropoutWithRandomUniformReplacer"); DropoutWithRandomUniformReplacer(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/eliminate_duplicate_ti_inputs.hpp b/src/common/transformations/include/transformations/common_optimizations/eliminate_duplicate_ti_inputs.hpp index 058daba182b8a6..d9b7db1d08519b 100644 --- a/src/common/transformations/include/transformations/common_optimizations/eliminate_duplicate_ti_inputs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/eliminate_duplicate_ti_inputs.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API EliminateDuplicateTIInputs; class ov::pass::EliminateDuplicateTIInputs : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateDuplicateTIInputs", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateDuplicateTIInputs"); EliminateDuplicateTIInputs(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/eliminate_loop_inputs_outputs.hpp b/src/common/transformations/include/transformations/common_optimizations/eliminate_loop_inputs_outputs.hpp index ca58bb504fd811..818e312e91f3d2 100644 --- a/src/common/transformations/include/transformations/common_optimizations/eliminate_loop_inputs_outputs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/eliminate_loop_inputs_outputs.hpp @@ -30,6 +30,6 @@ class TRANSFORMATIONS_API EliminateLoopInputsOutputs; class ov::pass::EliminateLoopInputsOutputs : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateLoopInputsOutputs", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateLoopInputsOutputs"); EliminateLoopInputsOutputs(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/eliminate_unsqueeze_gather.hpp b/src/common/transformations/include/transformations/common_optimizations/eliminate_unsqueeze_gather.hpp index b0ce4581a25569..95f52601c031ab 100644 --- a/src/common/transformations/include/transformations/common_optimizations/eliminate_unsqueeze_gather.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/eliminate_unsqueeze_gather.hpp @@ -24,7 +24,7 @@ class TRANSFORMATIONS_API EliminateGatherUnsqueeze; class ov::pass::EliminateUnsqueezeGather : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateUnsqueezeGather", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateUnsqueezeGather"); EliminateUnsqueezeGather(); }; @@ -38,6 +38,6 @@ class ov::pass::EliminateUnsqueezeGather : public ov::pass::MatcherPass { class ov::pass::EliminateGatherUnsqueeze : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateGatherUnsqueeze", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateGatherUnsqueeze"); EliminateGatherUnsqueeze(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/enable_shapeof_constant_folding.hpp b/src/common/transformations/include/transformations/common_optimizations/enable_shapeof_constant_folding.hpp index ab515ce65ac83b..dc81277841570d 100644 --- a/src/common/transformations/include/transformations/common_optimizations/enable_shapeof_constant_folding.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/enable_shapeof_constant_folding.hpp @@ -17,7 +17,7 @@ namespace pass { */ class TRANSFORMATIONS_API EnableShapeOfConstantFolding : public MatcherPass { public: - OPENVINO_RTTI("EnableShapeOfConstantFolding", "0"); + OPENVINO_MATCHER_PASS_RTTI("EnableShapeOfConstantFolding"); explicit EnableShapeOfConstantFolding(bool check_shape = true); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp b/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp index 8775d93644456e..89024746ee7181 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp @@ -34,7 +34,7 @@ TRANSFORMATIONS_API bool fold_subgraph_empty_inputs_is_disabled(const std::share class ov::pass::FoldSubgraphEmptyInputs : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FoldSubgraphEmptyInputs", "0"); + OPENVINO_MATCHER_PASS_RTTI("FoldSubgraphEmptyInputs"); FoldSubgraphEmptyInputs(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/fq_mul_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/fq_mul_fusion.hpp index 3b48e9da740269..d3215c8cb7168d 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fq_mul_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fq_mul_fusion.hpp @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API FakeQuantizeMulFusion; class ov::pass::FakeQuantizeMulFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FakeQuantizeMulFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("FakeQuantizeMulFusion"); FakeQuantizeMulFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/fq_reshape_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/fq_reshape_fusion.hpp index e4004d794d8ec7..361872e80b1d60 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fq_reshape_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fq_reshape_fusion.hpp @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API FakeQuantizeReshapeFusion; class ov::pass::FakeQuantizeReshapeFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FakeQuantizeReshapeFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("FakeQuantizeReshapeFusion"); FakeQuantizeReshapeFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp b/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp index 3449151ab93ac5..8c45842b274dd5 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp @@ -26,55 +26,55 @@ class TRANSFORMATIONS_API RoPEShareCosSin; class ov::pass::RoPEFusionGPTNEOX : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionGPTNEOX", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionGPTNEOX"); RoPEFusionGPTNEOX(); }; class ov::pass::RoPEFusionFlux : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionFlux", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionFlux"); RoPEFusionFlux(); }; class ov::pass::RoPEFusionGPTJ : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionGPTJ", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionGPTJ"); RoPEFusionGPTJ(); }; class ov::pass::RoPEFusionChatGLM : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionChatGLM", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionChatGLM"); RoPEFusionChatGLM(int split_output_id, const bool support_2d_rope = false); }; class ov::pass::RoPEFusionQwen : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionQwen", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionQwen"); RoPEFusionQwen(int split_output_id); }; class ov::pass::RoPEFusionIOSlicing : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionIOSlicing", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionIOSlicing"); RoPEFusionIOSlicing(); }; class ov::pass::RoPEFusionPreprocess : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionPreprocess", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionPreprocess"); RoPEFusionPreprocess(); }; class ov::pass::RoPEFusionCosSinPreprocess : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEFusionCosSinPreprocess", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEFusionCosSinPreprocess"); RoPEFusionCosSinPreprocess(); }; class ov::pass::RoPEShareCosSin : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RoPEShareCosSin", "0"); + OPENVINO_MATCHER_PASS_RTTI("RoPEShareCosSin"); RoPEShareCosSin(); private: diff --git a/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp index c3e50b7c992a53..1388cda8268e17 100644 --- a/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp @@ -31,7 +31,7 @@ class TRANSFORMATIONS_API GeluFusionWithTanhNoPower; */ class ov::pass::GeluFusionWithErfOne : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GeluFusionWithErfOne", "0"); + OPENVINO_MATCHER_PASS_RTTI("GeluFusionWithErfOne"); GeluFusionWithErfOne(); }; @@ -42,7 +42,7 @@ class ov::pass::GeluFusionWithErfOne : public ov::pass::MatcherPass { */ class ov::pass::GeluFusionWithErfTwo : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GeluFusionWithErfTwo", "0"); + OPENVINO_MATCHER_PASS_RTTI("GeluFusionWithErfTwo"); GeluFusionWithErfTwo(); }; @@ -53,7 +53,7 @@ class ov::pass::GeluFusionWithErfTwo : public ov::pass::MatcherPass { */ class ov::pass::GeluFusionWithErfThree : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GeluFusionWithErfThree", "0"); + OPENVINO_MATCHER_PASS_RTTI("GeluFusionWithErfThree"); GeluFusionWithErfThree(); }; @@ -64,7 +64,7 @@ class ov::pass::GeluFusionWithErfThree : public ov::pass::MatcherPass { */ class ov::pass::GeluFusionWithErfFour : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GeluFusionWithErfFour", "0"); + OPENVINO_MATCHER_PASS_RTTI("GeluFusionWithErfFour"); GeluFusionWithErfFour(); }; @@ -75,7 +75,7 @@ class ov::pass::GeluFusionWithErfFour : public ov::pass::MatcherPass { */ class ov::pass::GeluFusionWithTanh : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GeluFusionWithTanh", "0"); + OPENVINO_MATCHER_PASS_RTTI("GeluFusionWithTanh"); GeluFusionWithTanh(); }; @@ -86,7 +86,7 @@ class ov::pass::GeluFusionWithTanh : public ov::pass::MatcherPass { */ class ov::pass::GeluFusionWithTanhNoPower : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GeluFusionWithTanhNoPower", "0"); + OPENVINO_MATCHER_PASS_RTTI("GeluFusionWithTanhNoPower"); GeluFusionWithTanhNoPower(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/glu_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/glu_fusion.hpp index 7ec71a05027d80..e55a76e031f8f6 100644 --- a/src/common/transformations/include/transformations/common_optimizations/glu_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/glu_fusion.hpp @@ -13,7 +13,7 @@ namespace pass { class TRANSFORMATIONS_API GLUFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GLUFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("GLUFusion"); GLUFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/gru_cell_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/gru_cell_fusion.hpp index 50bc5ac370a74c..12031de6fc1c92 100644 --- a/src/common/transformations/include/transformations/common_optimizations/gru_cell_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/gru_cell_fusion.hpp @@ -35,6 +35,6 @@ class TRANSFORMATIONS_API GRUCellFusion; class ov::pass::GRUCellFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GRUCellFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("GRUCellFusion"); GRUCellFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp index 04841837a5a76b..72da0b538b9336 100644 --- a/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp @@ -29,7 +29,7 @@ class TRANSFORMATIONS_API HSigmoidFusionWithClampDiv; */ class ov::pass::HSigmoidFusionWithReluDiv : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidFusionWithReluDiv", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidFusionWithReluDiv"); HSigmoidFusionWithReluDiv(); }; @@ -39,7 +39,7 @@ class ov::pass::HSigmoidFusionWithReluDiv : public ov::pass::MatcherPass { */ class ov::pass::HSigmoidFusionWithReluMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidFusionWithReluMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidFusionWithReluMul"); HSigmoidFusionWithReluMul(); }; @@ -49,7 +49,7 @@ class ov::pass::HSigmoidFusionWithReluMul : public ov::pass::MatcherPass { */ class ov::pass::HSigmoidFusionWithoutRelu : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidFusionWithoutRelu", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidFusionWithoutRelu"); HSigmoidFusionWithoutRelu(); }; @@ -59,7 +59,7 @@ class ov::pass::HSigmoidFusionWithoutRelu : public ov::pass::MatcherPass { */ class ov::pass::HSigmoidFusionWithClampMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidFusionWithClampMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidFusionWithClampMul"); HSigmoidFusionWithClampMul(); }; @@ -69,7 +69,7 @@ class ov::pass::HSigmoidFusionWithClampMul : public ov::pass::MatcherPass { */ class ov::pass::HSigmoidFusionWithClampDiv : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidFusionWithClampDiv", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidFusionWithClampDiv"); HSigmoidFusionWithClampDiv(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp index ede2769ec278f1..40a91cc4f08a9d 100644 --- a/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp @@ -28,7 +28,7 @@ class TRANSFORMATIONS_API HSwishFusionWithClamp; */ class ov::pass::HSwishFusionWithReluDiv : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSwishFusionWithReluDiv", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSwishFusionWithReluDiv"); HSwishFusionWithReluDiv(); }; @@ -38,7 +38,7 @@ class ov::pass::HSwishFusionWithReluDiv : public ov::pass::MatcherPass { */ class ov::pass::HSwishFusionWithReluMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSwishFusionWithReluMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSwishFusionWithReluMul"); HSwishFusionWithReluMul(); }; @@ -48,7 +48,7 @@ class ov::pass::HSwishFusionWithReluMul : public ov::pass::MatcherPass { */ class ov::pass::HSwishFusionWithHSigmoid : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSwishFusionWithHSigmoid", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSwishFusionWithHSigmoid"); HSwishFusionWithHSigmoid(); }; @@ -58,7 +58,7 @@ class ov::pass::HSwishFusionWithHSigmoid : public ov::pass::MatcherPass { */ class ov::pass::HSwishFusionWithClamp : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSwishFusionWithClamp", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSwishFusionWithClamp"); HSwishFusionWithClamp(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/interpolate_sequence_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/interpolate_sequence_fusion.hpp index ff7495e5a2d405..af6ba9055b45e1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/interpolate_sequence_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/interpolate_sequence_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API InterpolateSequenceFusion; */ class ov::pass::InterpolateSequenceFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("InterpolateSequenceFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("InterpolateSequenceFusion"); InterpolateSequenceFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/leaky_relu_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/leaky_relu_fusion.hpp index 42abc74e0dbc2a..93765f2e39c2ef 100644 --- a/src/common/transformations/include/transformations/common_optimizations/leaky_relu_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/leaky_relu_fusion.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API LeakyReluFusion; class ov::pass::LeakyReluFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LeakyReluFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("LeakyReluFusion"); LeakyReluFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp index 2e239e40c332b9..091deb3e32e58f 100644 --- a/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp @@ -23,19 +23,19 @@ class TRANSFORMATIONS_API MultiplyMultiplyFusion; class ov::pass::AddMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AddMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("AddMultiplyFusion"); AddMultiplyFusion(); }; class ov::pass::AddAddFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AddAddFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("AddAddFusion"); AddAddFusion(); }; class ov::pass::MultiplyMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MultiplyMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MultiplyMultiplyFusion"); MultiplyMultiplyFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/lora_subgraph_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/lora_subgraph_fusion.hpp index 8422ad95f262c6..e31f99fdb63872 100644 --- a/src/common/transformations/include/transformations/common_optimizations/lora_subgraph_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/lora_subgraph_fusion.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API LoraSubgraphFusion; class ov::pass::LoraSubgraphFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LoraSubgraphFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("LoraSubgraphFusion"); LoraSubgraphFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp index 947e39edcfd0e0..1bc5344b31e09b 100644 --- a/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp @@ -24,7 +24,7 @@ class TRANSFORMATIONS_API LSTMCellFusionWithSplitWeights; */ class ov::pass::LSTMCellFusionWithJointWeights : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LSTMCellFusionWithJointWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("LSTMCellFusionWithJointWeights"); LSTMCellFusionWithJointWeights(); }; @@ -35,7 +35,7 @@ class ov::pass::LSTMCellFusionWithJointWeights : public ov::pass::MatcherPass { */ class ov::pass::LSTMCellFusionWithSplitWeights : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LSTMCellFusionWithSplitWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("LSTMCellFusionWithSplitWeights"); LSTMCellFusionWithSplitWeights(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mark_rope_input_to_keep_in_mixed_precision.hpp b/src/common/transformations/include/transformations/common_optimizations/mark_rope_input_to_keep_in_mixed_precision.hpp index c555b991de07ba..09db8db879dbeb 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mark_rope_input_to_keep_in_mixed_precision.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mark_rope_input_to_keep_in_mixed_precision.hpp @@ -27,7 +27,7 @@ namespace pass { class TRANSFORMATIONS_API MarkRopeInputsToKeepInMixedPrecision : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MarkRopeInputsToKeepInMixedPrecision", "0"); + OPENVINO_MATCHER_PASS_RTTI("MarkRopeInputsToKeepInMixedPrecision"); MarkRopeInputsToKeepInMixedPrecision(); private: @@ -35,4 +35,4 @@ class TRANSFORMATIONS_API MarkRopeInputsToKeepInMixedPrecision : public ov::pass }; } // namespace pass -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/transformations/include/transformations/common_optimizations/matmul_const_transposes_extraction.hpp b/src/common/transformations/include/transformations/common_optimizations/matmul_const_transposes_extraction.hpp index 7626be5f877527..313f5cd244a32e 100644 --- a/src/common/transformations/include/transformations/common_optimizations/matmul_const_transposes_extraction.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/matmul_const_transposes_extraction.hpp @@ -18,7 +18,7 @@ namespace pass { class TRANSFORMATIONS_API MatMulConstTransposesExtraction : public MatcherPass { public: - OPENVINO_RTTI("MatMulConstTransposesExtraction", "0"); + OPENVINO_MATCHER_PASS_RTTI("MatMulConstTransposesExtraction"); MatMulConstTransposesExtraction(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/matmul_multiply_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/matmul_multiply_fusion.hpp index 7994a04be7972d..767f44a8393e74 100644 --- a/src/common/transformations/include/transformations/common_optimizations/matmul_multiply_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/matmul_multiply_fusion.hpp @@ -58,6 +58,6 @@ class TRANSFORMATIONS_API MatMulMultiplyFusion; */ class ov::pass::MatMulMultiplyFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MatMulMultiplyFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MatMulMultiplyFusion"); MatMulMultiplyFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mish_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/mish_fusion.hpp index 8dc6e0149c881f..c905ee2f336232 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mish_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mish_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API MishFusion; */ class ov::pass::MishFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MishFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MishFusion"); MishFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp b/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp index 4f704b089190a4..d691eee7c29795 100644 --- a/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp @@ -29,7 +29,7 @@ namespace pass { /// └────────────────┘ └────────────────┘ class TRANSFORMATIONS_API MoveEltwiseUpThroughDataMovScalar : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveEltwiseUpThroughDataMovScalar", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveEltwiseUpThroughDataMovScalar"); MoveEltwiseUpThroughDataMovScalar(std::vector allowed_data_movement_ops); }; @@ -50,7 +50,7 @@ class TRANSFORMATIONS_API MoveEltwiseUpThroughDataMovScalar : public ov::pass::M /// └────────────────┘ └────────────────────┘ └───────────┘ └─────────────┘ class TRANSFORMATIONS_API MoveEltwiseUpThroughDataMovPerChannel : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveEltwiseUpThroughDataMovPerChannel", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveEltwiseUpThroughDataMovPerChannel"); MoveEltwiseUpThroughDataMovPerChannel(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mul_conv_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/mul_conv_fusion.hpp index 4fae74debc1014..84fe28e512549c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mul_conv_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mul_conv_fusion.hpp @@ -80,24 +80,24 @@ class TRANSFORMATIONS_API MultiplyGroupConvolutionBackpropDataFusion; class ov::pass::MultiplyConvolutionFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MultiplyConvolutionFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MultiplyConvolutionFusion"); MultiplyConvolutionFusion(); }; class ov::pass::MultiplyGroupConvolutionFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MultiplyGroupConvolutionFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MultiplyGroupConvolutionFusion"); MultiplyGroupConvolutionFusion(); }; class ov::pass::MultiplyConvolutionBackpropDataFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MultiplyConvolutionBackpropDataFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MultiplyConvolutionBackpropDataFusion"); MultiplyConvolutionBackpropDataFusion(); }; class ov::pass::MultiplyGroupConvolutionBackpropDataFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MultiplyGroupConvolutionBackpropDataFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MultiplyGroupConvolutionBackpropDataFusion"); MultiplyGroupConvolutionBackpropDataFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mul_fake_quantize_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/mul_fake_quantize_fusion.hpp index f66e52f82c6c0e..e5afe2b7ace09c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mul_fake_quantize_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mul_fake_quantize_fusion.hpp @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API MulFakeQuantizeFusion; */ class ov::pass::MulFakeQuantizeFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MulFakeQuantizeFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MulFakeQuantizeFusion"); MulFakeQuantizeFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp index dc8561d89e18ab..4a625816261c9b 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp @@ -28,7 +28,7 @@ class TRANSFORMATIONS_API MVNFusionWithConstantsInside; */ class ov::pass::MVNFusionWithoutConstants : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MVNFusionWithoutConstants", "0"); + OPENVINO_MATCHER_PASS_RTTI("MVNFusionWithoutConstants"); MVNFusionWithoutConstants(); }; @@ -40,7 +40,7 @@ class ov::pass::MVNFusionWithoutConstants : public ov::pass::MatcherPass { */ class ov::pass::MVNFusionWithConstantsInside : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MVNFusionWithConstantsInside", "0"); + OPENVINO_MATCHER_PASS_RTTI("MVNFusionWithConstantsInside"); MVNFusionWithConstantsInside(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/nearest_neighbor_upsampling_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/nearest_neighbor_upsampling_fusion.hpp index 04b45ee6c67cd3..338e48afd77f64 100644 --- a/src/common/transformations/include/transformations/common_optimizations/nearest_neighbor_upsampling_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/nearest_neighbor_upsampling_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API NearestNeighborUpsamplingFusion; */ class ov::pass::NearestNeighborUpsamplingFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NearestNeighborUpsamplingFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("NearestNeighborUpsamplingFusion"); NearestNeighborUpsamplingFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/nonzero_horizontal_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/nonzero_horizontal_fusion.hpp index d579cb0c8a9ec1..e0ad6ccb98453a 100644 --- a/src/common/transformations/include/transformations/common_optimizations/nonzero_horizontal_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/nonzero_horizontal_fusion.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API NonZeroHorizontalFusion; */ class ov::pass::NonZeroHorizontalFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NonZeroHorizontalFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("NonZeroHorizontalFusion"); NonZeroHorizontalFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp b/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp index 76190906d157e2..ea7d428bc1eea7 100644 --- a/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp @@ -39,7 +39,7 @@ class TRANSFORMATIONS_API PrepareShapeOpsForEliminationAroundBE; */ class ov::pass::EliminateReduceReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateReduceReshape", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateReduceReshape"); EliminateReduceReshape(); }; @@ -49,7 +49,7 @@ class ov::pass::EliminateReduceReshape : public ov::pass::MatcherPass { */ class ov::pass::EliminatePad : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminatePad", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminatePad"); EliminatePad(); }; @@ -59,7 +59,7 @@ class ov::pass::EliminatePad : public ov::pass::MatcherPass { */ class ov::pass::EliminateConvert : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateConvert", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateConvert"); EliminateConvert(); }; @@ -69,7 +69,7 @@ class ov::pass::EliminateConvert : public ov::pass::MatcherPass { */ class ov::pass::EliminateConvertNonZero : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateConvertNonZero", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateConvertNonZero"); EliminateConvertNonZero(); }; @@ -79,7 +79,7 @@ class ov::pass::EliminateConvertNonZero : public ov::pass::MatcherPass { */ class ov::pass::EliminateConcat : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateConcat", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateConcat"); EliminateConcat(); }; @@ -89,7 +89,7 @@ class ov::pass::EliminateConcat : public ov::pass::MatcherPass { */ class ov::pass::EliminateSplit : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateSplit", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateSplit"); EliminateSplit(); }; @@ -99,7 +99,7 @@ class ov::pass::EliminateSplit : public ov::pass::MatcherPass { */ class ov::pass::EliminateSqueeze : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateSqueeze", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateSqueeze"); EliminateSqueeze(); }; @@ -109,7 +109,7 @@ class ov::pass::EliminateSqueeze : public ov::pass::MatcherPass { */ class ov::pass::EliminateUnsqueeze : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateUnsqueeze", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateUnsqueeze"); EliminateUnsqueeze(); }; @@ -119,7 +119,7 @@ class ov::pass::EliminateUnsqueeze : public ov::pass::MatcherPass { */ class ov::pass::EliminateTranspose : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateTranspose", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateTranspose"); EliminateTranspose(); }; @@ -129,7 +129,7 @@ class ov::pass::EliminateTranspose : public ov::pass::MatcherPass { */ class ov::pass::EliminateEltwise : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateEltwise", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateEltwise"); EliminateEltwise(); }; @@ -139,7 +139,7 @@ class ov::pass::EliminateEltwise : public ov::pass::MatcherPass { */ class ov::pass::EliminateScatterUpdate : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateScatterUpdate", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateScatterUpdate"); EliminateScatterUpdate(); }; @@ -155,7 +155,7 @@ class ov::pass::NopElimination : public GraphRewrite { */ class ov::pass::EliminateSplitConcat : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateSplitConcat", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateSplitConcat"); EliminateSplitConcat(); }; @@ -165,7 +165,7 @@ class ov::pass::EliminateSplitConcat : public ov::pass::MatcherPass { */ class ov::pass::EliminateNopBroadcast : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateNopBroadcast", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateNopBroadcast"); EliminateNopBroadcast(); }; @@ -177,7 +177,7 @@ class ov::pass::EliminateNopBroadcast : public ov::pass::MatcherPass { */ class ov::pass::EliminateSliceBeforeGatherElements : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateSliceBeforeGatherElements", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateSliceBeforeGatherElements"); EliminateSliceBeforeGatherElements(); }; @@ -188,7 +188,7 @@ class ov::pass::EliminateSliceBeforeGatherElements : public ov::pass::MatcherPas */ class ov::pass::EliminateStridedSlice : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateStridedSlice", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateStridedSlice"); EliminateStridedSlice(); }; @@ -199,7 +199,7 @@ class ov::pass::EliminateStridedSlice : public ov::pass::MatcherPass { */ class ov::pass::EliminateSlice : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateSlice", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateSlice"); EliminateSlice(); }; @@ -210,7 +210,7 @@ class ov::pass::EliminateSlice : public ov::pass::MatcherPass { */ class ov::pass::EliminateStridedSliceByShape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateStridedSliceByShape", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateStridedSliceByShape"); EliminateStridedSliceByShape(); }; @@ -222,6 +222,6 @@ class ov::pass::EliminateStridedSliceByShape : public ov::pass::MatcherPass { */ class ov::pass::PrepareShapeOpsForEliminationAroundBE : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PrepareShapeOpsForEliminationAroundBE", "0"); + OPENVINO_MATCHER_PASS_RTTI("PrepareShapeOpsForEliminationAroundBE"); PrepareShapeOpsForEliminationAroundBE(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/normalize_l2_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/normalize_l2_fusion.hpp index 39355ec6af5ec4..01ac902140f01b 100644 --- a/src/common/transformations/include/transformations/common_optimizations/normalize_l2_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/normalize_l2_fusion.hpp @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API NormalizeL2Fusion; */ class ov::pass::NormalizeL2Fusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NormalizeL2Fusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("NormalizeL2Fusion"); NormalizeL2Fusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp b/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp index cb642795254791..961c5e893e8119 100644 --- a/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp @@ -71,7 +71,7 @@ class ov::pass::GroupedSliceToVSplitOptimization : public ov::pass::ModelPass { */ class ov::pass::SliceSequenceToSingleSlice : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SliceSequenceToSingleSlice", "0"); + OPENVINO_MATCHER_PASS_RTTI("SliceSequenceToSingleSlice"); SliceSequenceToSingleSlice(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp index 799fe0a8a4ea81..018c098d221d3e 100644 --- a/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp @@ -30,7 +30,7 @@ class TRANSFORMATIONS_API PadFusionGroupConvolutionBackpropData; */ class ov::pass::PadFusionAvgPool : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PadFusionAvgPool", "0"); + OPENVINO_MATCHER_PASS_RTTI("PadFusionAvgPool"); PadFusionAvgPool(); }; @@ -43,7 +43,7 @@ class ov::pass::PadFusionAvgPool : public ov::pass::MatcherPass { */ class ov::pass::PadFusionConvolution : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PadFusionConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("PadFusionConvolution"); PadFusionConvolution(); }; @@ -57,7 +57,7 @@ class ov::pass::PadFusionConvolution : public ov::pass::MatcherPass { */ class ov::pass::PadFusionConvolutionBackpropData : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PadFusionConvolutionBackpropData", "0"); + OPENVINO_MATCHER_PASS_RTTI("PadFusionConvolutionBackpropData"); PadFusionConvolutionBackpropData(); }; @@ -70,7 +70,7 @@ class ov::pass::PadFusionConvolutionBackpropData : public ov::pass::MatcherPass */ class ov::pass::PadFusionGroupConvolution : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PadFusionGroupConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("PadFusionGroupConvolution"); PadFusionGroupConvolution(); }; @@ -84,7 +84,7 @@ class ov::pass::PadFusionGroupConvolution : public ov::pass::MatcherPass { */ class ov::pass::PadFusionGroupConvolutionBackpropData : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PadFusionGroupConvolutionBackpropData", "0"); + OPENVINO_MATCHER_PASS_RTTI("PadFusionGroupConvolutionBackpropData"); PadFusionGroupConvolutionBackpropData(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp index b2ba7ca9447450..bb1c3206f1ea3d 100644 --- a/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp @@ -41,7 +41,7 @@ class TRANSFORMATIONS_API PReluFusionNegReluMulAdd; */ class ov::pass::PReluFusionNegativeAdd : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PReluFusionNegativeAdd", "0"); + OPENVINO_MATCHER_PASS_RTTI("PReluFusionNegativeAdd"); PReluFusionNegativeAdd(); }; @@ -60,7 +60,7 @@ class ov::pass::PReluFusionNegativeAdd : public ov::pass::MatcherPass { */ class ov::pass::PReluFusionNegativeSub : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PReluFusionNegativeSub", "0"); + OPENVINO_MATCHER_PASS_RTTI("PReluFusionNegativeSub"); PReluFusionNegativeSub(); }; @@ -79,7 +79,7 @@ class ov::pass::PReluFusionNegativeSub : public ov::pass::MatcherPass { */ class ov::pass::PReluFusionMultiplyAdd : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PReluFusionMultiplyAdd", "0"); + OPENVINO_MATCHER_PASS_RTTI("PReluFusionMultiplyAdd"); PReluFusionMultiplyAdd(); }; @@ -98,7 +98,7 @@ class ov::pass::PReluFusionMultiplyAdd : public ov::pass::MatcherPass { */ class ov::pass::PReluFusionMultiplySub : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PReluFusionMultiplySub", "0"); + OPENVINO_MATCHER_PASS_RTTI("PReluFusionMultiplySub"); PReluFusionMultiplySub(); }; @@ -119,7 +119,7 @@ class ov::pass::PReluFusionMultiplySub : public ov::pass::MatcherPass { */ class ov::pass::PReluFusionAbsSubMulMulAdd : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PReluFusionAbsSubMulMulAdd", "0"); + OPENVINO_MATCHER_PASS_RTTI("PReluFusionAbsSubMulMulAdd"); PReluFusionAbsSubMulMulAdd(); }; @@ -138,7 +138,7 @@ class ov::pass::PReluFusionAbsSubMulMulAdd : public ov::pass::MatcherPass { */ class ov::pass::PReluFusionNegReluMulAdd : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PReluFusionNegReluMulAdd", "0"); + OPENVINO_MATCHER_PASS_RTTI("PReluFusionNegReluMulAdd"); PReluFusionNegReluMulAdd(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp b/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp index c31902cd478bfa..2824ba2a8b374d 100644 --- a/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp @@ -25,7 +25,7 @@ class TRANSFORMATIONS_API PullReshapeThroughReduce; */ class ov::pass::PullUnsqueezeThroughReduce : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PullUnsqueezeThroughReduce", "0"); + OPENVINO_MATCHER_PASS_RTTI("PullUnsqueezeThroughReduce"); PullUnsqueezeThroughReduce(); }; @@ -37,7 +37,7 @@ class ov::pass::PullUnsqueezeThroughReduce : public ov::pass::MatcherPass { */ class ov::pass::PullReshapeThroughReduce : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PullReshapeThroughReduce", "0"); + OPENVINO_MATCHER_PASS_RTTI("PullReshapeThroughReduce"); PullReshapeThroughReduce(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/pull_transpose_through_fq.hpp b/src/common/transformations/include/transformations/common_optimizations/pull_transpose_through_fq.hpp index c4aa71724a07a3..5e92d0eab4247c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/pull_transpose_through_fq.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/pull_transpose_through_fq.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API PullTransposeThroughFQUp; class ov::pass::PullTransposeThroughFQUp : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PullTransposeThroughFQUp", "0"); + OPENVINO_MATCHER_PASS_RTTI("PullTransposeThroughFQUp"); PullTransposeThroughFQUp(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/random_uniform_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/random_uniform_fusion.hpp index 198666e9a02673..1f4c76ee810612 100644 --- a/src/common/transformations/include/transformations/common_optimizations/random_uniform_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/random_uniform_fusion.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API RandomUniformFusion; */ class ov::pass::RandomUniformFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RandomUniformFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("RandomUniformFusion"); RandomUniformFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/reduce_merge.hpp b/src/common/transformations/include/transformations/common_optimizations/reduce_merge.hpp index 20daf9173b87c8..a24ce14c43aaeb 100644 --- a/src/common/transformations/include/transformations/common_optimizations/reduce_merge.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/reduce_merge.hpp @@ -64,6 +64,6 @@ class TRANSFORMATIONS_API ReduceMerge; */ class ov::pass::ReduceMerge : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReduceMerge", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReduceMerge"); ReduceMerge(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/reduce_reshape_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/reduce_reshape_fusion.hpp index 2827be2cdb5738..b7c5978e1458e1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/reduce_reshape_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/reduce_reshape_fusion.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ReduceReshapeFusion; */ class ov::pass::ReduceReshapeFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReduceReshapeFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReduceReshapeFusion"); ReduceReshapeFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/relu_fake_quantize_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/relu_fake_quantize_fusion.hpp index 8d2895b378c774..04ffa7ddb4b8b0 100644 --- a/src/common/transformations/include/transformations/common_optimizations/relu_fake_quantize_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/relu_fake_quantize_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ReluFakeQuantizeFusion; class ov::pass::ReluFakeQuantizeFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReluFakeQuantizeFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReluFakeQuantizeFusion"); ReluFakeQuantizeFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp b/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp index 5c746cd4dde987..881fd9cb23e9c3 100644 --- a/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp @@ -24,7 +24,7 @@ class OPENVINO_API DisableRemoveConcatZeroDimInput; class RemoveConcatZeroDimInput : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RemoveConcatZeroDimInput", "0"); + OPENVINO_MATCHER_PASS_RTTI("RemoveConcatZeroDimInput"); RemoveConcatZeroDimInput(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp b/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp index 40dfb824d2ece2..02ebb6eec3a48d 100644 --- a/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp @@ -27,6 +27,6 @@ class ov::pass::FuseFilteringBoxesBySize : public ov::pass::GraphRewrite { class ov::pass::RemoveFilteringBoxesBySize : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RemoveFilteringBoxesBySize", "0"); + OPENVINO_MATCHER_PASS_RTTI("RemoveFilteringBoxesBySize"); RemoveFilteringBoxesBySize(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/reshape_prelu.hpp b/src/common/transformations/include/transformations/common_optimizations/reshape_prelu.hpp index 9ced2036d9906b..6c5a629a2fa840 100644 --- a/src/common/transformations/include/transformations/common_optimizations/reshape_prelu.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/reshape_prelu.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ReshapePRelu; class ov::pass::ReshapePRelu : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapePRelu", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapePRelu"); ReshapePRelu(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/reshape_sequence_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/reshape_sequence_fusion.hpp index 5fa22e7feb0fe6..5aaed4a6be32ad 100644 --- a/src/common/transformations/include/transformations/common_optimizations/reshape_sequence_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/reshape_sequence_fusion.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API ReshapeSequenceFusion; class ov::pass::ReshapeSequenceFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapeSequenceFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapeSequenceFusion"); ReshapeSequenceFusion(bool use_shape_for_elimination = true); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/rms_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/rms_fusion.hpp index d8cb02a596ab6c..0a63b3cb6e9a7c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/rms_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/rms_fusion.hpp @@ -29,7 +29,7 @@ namespace pass { class RMSFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RMSFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("RMSFusion"); RMSFusion(bool force_tail_convert = true); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/select_with_one_value_condition.hpp b/src/common/transformations/include/transformations/common_optimizations/select_with_one_value_condition.hpp index 5278e17d07ff64..1146565efa1b48 100644 --- a/src/common/transformations/include/transformations/common_optimizations/select_with_one_value_condition.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/select_with_one_value_condition.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API SelectWithOneValueCondition; class ov::pass::SelectWithOneValueCondition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SelectWithOneValueCondition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SelectWithOneValueCondition"); SelectWithOneValueCondition(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/sequence_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/sequence_fusion.hpp index ecba41990e28dd..a4f432b8bd584f 100644 --- a/src/common/transformations/include/transformations/common_optimizations/sequence_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/sequence_fusion.hpp @@ -30,6 +30,6 @@ class TRANSFORMATIONS_API SequenceFusion; class ov::pass::SequenceFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SequenceFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SequenceFusion"); SequenceFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/shuffle_channels_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/shuffle_channels_fusion.hpp index 2304fe637b4551..b88cbdb64731a0 100644 --- a/src/common/transformations/include/transformations/common_optimizations/shuffle_channels_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/shuffle_channels_fusion.hpp @@ -34,6 +34,6 @@ class TRANSFORMATIONS_API ShuffleChannelsFusion; class ov::pass::ShuffleChannelsFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ShuffleChannelsFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("ShuffleChannelsFusion"); ShuffleChannelsFusion(const bool reshape_constants_check); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp index 79e0ffd789bf7c..66198e60f3a564 100644 --- a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp @@ -31,7 +31,7 @@ class TRANSFORMATIONS_API AbsSinking; */ class ov::pass::GroupedGatherElimination : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GroupedGatherElimination", "0"); + OPENVINO_MATCHER_PASS_RTTI("GroupedGatherElimination"); GroupedGatherElimination(); }; @@ -55,7 +55,7 @@ class ov::pass::SimplifyShapeOfSubGraph : public ov::pass::ModelPass { */ class ov::pass::GatherNopElimination : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GatherNopElimination", "0"); + OPENVINO_MATCHER_PASS_RTTI("GatherNopElimination"); GatherNopElimination(); }; @@ -67,7 +67,7 @@ class ov::pass::GatherNopElimination : public ov::pass::MatcherPass { */ class ov::pass::SimplifyGatherShapeOf : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SimplifyGatherShapeOf", "0"); + OPENVINO_MATCHER_PASS_RTTI("SimplifyGatherShapeOf"); SimplifyGatherShapeOf(); }; @@ -78,7 +78,7 @@ class ov::pass::SimplifyGatherShapeOf : public ov::pass::MatcherPass { */ class ov::pass::SimplifySecondInputOfReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SimplifySecondInputOfReshape", "0"); + OPENVINO_MATCHER_PASS_RTTI("SimplifySecondInputOfReshape"); SimplifySecondInputOfReshape(); }; @@ -90,6 +90,6 @@ class ov::pass::SimplifySecondInputOfReshape : public ov::pass::MatcherPass { */ class ov::pass::AbsSinking : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AbsSinking", "0"); + OPENVINO_MATCHER_PASS_RTTI("AbsSinking"); AbsSinking(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/skip_gather_before_transpose_and_reshape.hpp b/src/common/transformations/include/transformations/common_optimizations/skip_gather_before_transpose_and_reshape.hpp index a93fdb8f4f20fe..3e937ba217d66c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/skip_gather_before_transpose_and_reshape.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/skip_gather_before_transpose_and_reshape.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API SkipGatherBeforeTransposeAndReshape; */ class ov::pass::SkipGatherBeforeTransposeAndReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SkipGatherBeforeTransposeAndReshape", "0"); + OPENVINO_MATCHER_PASS_RTTI("SkipGatherBeforeTransposeAndReshape"); SkipGatherBeforeTransposeAndReshape(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/softmax_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/softmax_fusion.hpp index f56a8bd1b574f7..07524e3799cf64 100644 --- a/src/common/transformations/include/transformations/common_optimizations/softmax_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/softmax_fusion.hpp @@ -103,6 +103,6 @@ class TRANSFORMATIONS_API SoftmaxFusion; class ov::pass::SoftmaxFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftmaxFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftmaxFusion"); SoftmaxFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/softplus_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/softplus_fusion.hpp index 44d34dcdcc4c90..3dbadc5c2b6046 100644 --- a/src/common/transformations/include/transformations/common_optimizations/softplus_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/softplus_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API SoftPlusFusion; */ class ov::pass::SoftPlusFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftPlusFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftPlusFusion"); SoftPlusFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/softplus_to_mish_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/softplus_to_mish_fusion.hpp index 6a6d247e4ea351..cbc0194ad20c62 100644 --- a/src/common/transformations/include/transformations/common_optimizations/softplus_to_mish_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/softplus_to_mish_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API SoftPlusToMishFusion; */ class ov::pass::SoftPlusToMishFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftPlusToMishFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftPlusToMishFusion"); SoftPlusToMishFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/space_to_batch_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/space_to_batch_fusion.hpp index 43cf4654628cf5..aa95f690512b42 100644 --- a/src/common/transformations/include/transformations/common_optimizations/space_to_batch_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/space_to_batch_fusion.hpp @@ -32,6 +32,6 @@ class TRANSFORMATIONS_API SpaceToBatchFusion; class ov::pass::SpaceToBatchFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SpaceToBatchFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SpaceToBatchFusion"); SpaceToBatchFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/split_concat_pair_to_interpolate_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/split_concat_pair_to_interpolate_fusion.hpp index e9ecba4fe6e961..72887dd32d9008 100644 --- a/src/common/transformations/include/transformations/common_optimizations/split_concat_pair_to_interpolate_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/split_concat_pair_to_interpolate_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API SplitConcatPairToInterpolateFusion; */ class ov::pass::SplitConcatPairToInterpolateFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SplitConcatPairToInterpolateFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SplitConcatPairToInterpolateFusion"); SplitConcatPairToInterpolateFusion(bool use_shape_for_elimination = true); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/split_squeeze_concat_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/split_squeeze_concat_fusion.hpp index 24073c6a61e2dc..3cd3c9429be0f1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/split_squeeze_concat_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/split_squeeze_concat_fusion.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API SplitSqueezeConcatFusion; */ class ov::pass::SplitSqueezeConcatFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SplitSqueezeConcatFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SplitSqueezeConcatFusion"); SplitSqueezeConcatFusion(bool use_shapes); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp b/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp index 57f5036fe5faa7..acdd30580b1a23 100644 --- a/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp @@ -28,7 +28,7 @@ class TRANSFORMATIONS_API StridesOptimization; */ class ov::pass::ConvStridesPropagation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvStridesPropagation", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvStridesPropagation"); ConvStridesPropagation(); }; @@ -40,7 +40,7 @@ class ov::pass::ConvStridesPropagation : public ov::pass::MatcherPass { */ class ov::pass::SupportedNodesStridesPropagation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SupportedNodesStridesPropagation", "0"); + OPENVINO_MATCHER_PASS_RTTI("SupportedNodesStridesPropagation"); SupportedNodesStridesPropagation(); }; @@ -51,7 +51,7 @@ class ov::pass::SupportedNodesStridesPropagation : public ov::pass::MatcherPass */ class ov::pass::UnsupportedNodesStridesPropagation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UnsupportedNodesStridesPropagation", "0"); + OPENVINO_MATCHER_PASS_RTTI("UnsupportedNodesStridesPropagation"); UnsupportedNodesStridesPropagation(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/subtract_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/subtract_fusion.hpp index 69e4095b6becd7..1963f226b830be 100644 --- a/src/common/transformations/include/transformations/common_optimizations/subtract_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/subtract_fusion.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API SubtractFusion; */ class ov::pass::SubtractFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SubtractFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("SubtractFusion"); SubtractFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp index a630e1998e84ca..2bee12da7d9c41 100644 --- a/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp @@ -28,7 +28,7 @@ class TRANSFORMATIONS_API SwishFusionWithoutBeta; */ class ov::pass::SwishFusionWithSigmoid : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SwishFusionWithSigmoid", "0"); + OPENVINO_MATCHER_PASS_RTTI("SwishFusionWithSigmoid"); SwishFusionWithSigmoid(); }; @@ -38,7 +38,7 @@ class ov::pass::SwishFusionWithSigmoid : public ov::pass::MatcherPass { */ class ov::pass::SwishFusionWithSigmoidWithBeta : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SwishFusionWithSigmoidWithBeta", "0"); + OPENVINO_MATCHER_PASS_RTTI("SwishFusionWithSigmoidWithBeta"); SwishFusionWithSigmoidWithBeta(); }; @@ -48,7 +48,7 @@ class ov::pass::SwishFusionWithSigmoidWithBeta : public ov::pass::MatcherPass { */ class ov::pass::SwishFusionWithBeta : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SwishFusionWithBeta", "0"); + OPENVINO_MATCHER_PASS_RTTI("SwishFusionWithBeta"); SwishFusionWithBeta(); }; @@ -58,7 +58,7 @@ class ov::pass::SwishFusionWithBeta : public ov::pass::MatcherPass { */ class ov::pass::SwishFusionWithoutBeta : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SwishFusionWithoutBeta", "0"); + OPENVINO_MATCHER_PASS_RTTI("SwishFusionWithoutBeta"); SwishFusionWithoutBeta(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/transpose_reshape_elimination_for_matmul.hpp b/src/common/transformations/include/transformations/common_optimizations/transpose_reshape_elimination_for_matmul.hpp index c1299872c4b3a5..b250ce1d3c3866 100644 --- a/src/common/transformations/include/transformations/common_optimizations/transpose_reshape_elimination_for_matmul.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/transpose_reshape_elimination_for_matmul.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API TransposeReshapeEliminationForMatmul; */ class ov::pass::TransposeReshapeEliminationForMatmul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeReshapeEliminationForMatmul", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeReshapeEliminationForMatmul"); TransposeReshapeEliminationForMatmul(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp b/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp index 8189535fe260ae..31de821878e971 100644 --- a/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp @@ -29,7 +29,7 @@ class TRANSFORMATIONS_API TransposeFuse; */ class ov::pass::TransposeReduction : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeReduction", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeReduction"); TransposeReduction(); }; @@ -40,7 +40,7 @@ class ov::pass::TransposeReduction : public ov::pass::MatcherPass { */ class ov::pass::TransposeFQReduction : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeFQReduction", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeFQReduction"); TransposeFQReduction(); }; @@ -50,7 +50,7 @@ class ov::pass::TransposeFQReduction : public ov::pass::MatcherPass { */ class ov::pass::TransposeConvert : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeConvert", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeConvert"); TransposeConvert(); }; @@ -60,7 +60,7 @@ class ov::pass::TransposeConvert : public ov::pass::MatcherPass { */ class ov::pass::TransposeEltwise : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeEltwise", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeEltwise"); TransposeEltwise(); }; @@ -71,7 +71,7 @@ class ov::pass::TransposeEltwise : public ov::pass::MatcherPass { */ class ov::pass::TransposeFuse : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeFuse", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeFuse"); TransposeFuse(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/transpose_to_reshape.hpp b/src/common/transformations/include/transformations/common_optimizations/transpose_to_reshape.hpp index aba6154bb0b58c..3a99d47858ec6f 100644 --- a/src/common/transformations/include/transformations/common_optimizations/transpose_to_reshape.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/transpose_to_reshape.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API TransposeToReshape; */ class ov::pass::TransposeToReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeToReshape", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeToReshape"); TransposeToReshape(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp b/src/common/transformations/include/transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp index aa34c7859d068a..1b77d8f519d391 100644 --- a/src/common/transformations/include/transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp @@ -28,6 +28,6 @@ class TRANSFORMATIONS_API WeightsDequantizeToFakeQuantize; */ class ov::pass::WeightsDequantizeToFakeQuantize : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("WeightsDequantizeToFakeQuantize", "0"); + OPENVINO_MATCHER_PASS_RTTI("WeightsDequantizeToFakeQuantize"); WeightsDequantizeToFakeQuantize(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/wrap_interpolate_into_transposes.hpp b/src/common/transformations/include/transformations/common_optimizations/wrap_interpolate_into_transposes.hpp index 7b35498d5dde64..e0ef8b68bdaf04 100644 --- a/src/common/transformations/include/transformations/common_optimizations/wrap_interpolate_into_transposes.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/wrap_interpolate_into_transposes.hpp @@ -35,6 +35,6 @@ class TRANSFORMATIONS_API WrapInterpolateIntoTransposes; */ class ov::pass::WrapInterpolateIntoTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("WrapInterpolateIntoTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("WrapInterpolateIntoTransposes"); WrapInterpolateIntoTransposes(); }; diff --git a/src/common/transformations/include/transformations/flush_fp32_subnormals_to_zero.hpp b/src/common/transformations/include/transformations/flush_fp32_subnormals_to_zero.hpp index 71c71b0614f29d..b9522d4a4273dd 100644 --- a/src/common/transformations/include/transformations/flush_fp32_subnormals_to_zero.hpp +++ b/src/common/transformations/include/transformations/flush_fp32_subnormals_to_zero.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API FlushFP32SubnormalsToZero; */ class ov::pass::FlushFP32SubnormalsToZero : public MatcherPass { public: - OPENVINO_RTTI("FlushFP32SubnormalsToZero", "0"); + OPENVINO_MATCHER_PASS_RTTI("FlushFP32SubnormalsToZero"); FlushFP32SubnormalsToZero(); }; diff --git a/src/common/transformations/include/transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp b/src/common/transformations/include/transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp index 4f003dbc09e671..99852d0dc0df3f 100644 --- a/src/common/transformations/include/transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API MarkCompressedFloatConstants; */ class ov::pass::EnableDecompressionConvertConstantFolding : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EnableDecompressionConvertConstantFolding", "0"); + OPENVINO_MATCHER_PASS_RTTI("EnableDecompressionConvertConstantFolding"); EnableDecompressionConvertConstantFolding(); }; @@ -36,7 +36,7 @@ class ov::pass::EnableDecompressionConvertConstantFolding : public ov::pass::Mat */ class ov::pass::DisableDecompressionConvertConstantFolding : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DisableDecompressionConvertConstantFolding", "0"); + OPENVINO_MATCHER_PASS_RTTI("DisableDecompressionConvertConstantFolding"); DisableDecompressionConvertConstantFolding(); }; @@ -46,7 +46,7 @@ class ov::pass::DisableDecompressionConvertConstantFolding : public ov::pass::Ma */ class ov::pass::KeepConstAndDecompression : public MatcherPass { public: - OPENVINO_RTTI("KeepConstAndDecompression", "0"); + OPENVINO_MATCHER_PASS_RTTI("KeepConstAndDecompression"); KeepConstAndDecompression(); }; @@ -56,7 +56,7 @@ class ov::pass::KeepConstAndDecompression : public MatcherPass { */ class ov::pass::KeepConstantsPrecisionAndAddConverts : public MatcherPass { public: - OPENVINO_RTTI("KeepConstantsPrecisionAndAddConverts", "0"); + OPENVINO_MATCHER_PASS_RTTI("KeepConstantsPrecisionAndAddConverts"); KeepConstantsPrecisionAndAddConverts(); }; @@ -69,6 +69,6 @@ class ov::pass::KeepConstantsPrecisionAndAddConverts : public MatcherPass { */ class ov::pass::MarkCompressedFloatConstants : public MatcherPass { public: - OPENVINO_RTTI("KeepFWPrecisionFor16BitFloatConstants", "0"); + OPENVINO_MATCHER_PASS_RTTI("MarkCompressedFloatConstants"); MarkCompressedFloatConstants(); }; diff --git a/src/common/transformations/include/transformations/fp16_compression/mark_floatpoint_range.hpp b/src/common/transformations/include/transformations/fp16_compression/mark_floatpoint_range.hpp index a61bd270c584ec..c1f948299c4321 100644 --- a/src/common/transformations/include/transformations/fp16_compression/mark_floatpoint_range.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/mark_floatpoint_range.hpp @@ -18,7 +18,7 @@ namespace pass { */ class TRANSFORMATIONS_API MarkFloatingPointRange : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MarkFloatingPointRange", "0"); + OPENVINO_MATCHER_PASS_RTTI("MarkFloatingPointRange"); MarkFloatingPointRange(); }; @@ -27,4 +27,4 @@ OPENVINO_API bool is_range_path(const std::shared_ptr& node); OPENVINO_API void erase_range_path(const std::shared_ptr& node); } // namespace pass -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/transformations/include/transformations/low_precision/mark_dequantization_subgraph.hpp b/src/common/transformations/include/transformations/low_precision/mark_dequantization_subgraph.hpp index 6cbd8d990ac73e..22f1eb753a28ad 100644 --- a/src/common/transformations/include/transformations/low_precision/mark_dequantization_subgraph.hpp +++ b/src/common/transformations/include/transformations/low_precision/mark_dequantization_subgraph.hpp @@ -40,7 +40,7 @@ namespace pass { */ class TRANSFORMATIONS_API MarkDequantization : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MarkDequantization", "0"); + OPENVINO_MATCHER_PASS_RTTI("MarkDequantization"); explicit MarkDequantization(const element::TypeVector& precisions, bool fold_subtract_const = false, bool fold_multiply_const = true); @@ -70,7 +70,7 @@ class TRANSFORMATIONS_API MarkDequantization : public ov::pass::MatcherPass { */ class TRANSFORMATIONS_API KeepConstsPrecision : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("KeepConstsPrecision", "0"); + OPENVINO_MATCHER_PASS_RTTI("KeepConstsPrecision"); explicit KeepConstsPrecision(const element::TypeVector& precisions, bool fold_subtract_const = false, bool fold_multiply_const = true); diff --git a/src/common/transformations/include/transformations/sdpa_to_paged_attention/position_ids_replacer.hpp b/src/common/transformations/include/transformations/sdpa_to_paged_attention/position_ids_replacer.hpp index 5ee79ec787a9bc..50c0ecd20e76af 100644 --- a/src/common/transformations/include/transformations/sdpa_to_paged_attention/position_ids_replacer.hpp +++ b/src/common/transformations/include/transformations/sdpa_to_paged_attention/position_ids_replacer.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API PositionIDsReplacer; class ov::pass::PositionIDsReplacer : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PositionIDsReplacer", "0"); + OPENVINO_MATCHER_PASS_RTTI("PositionIDsReplacer"); explicit PositionIDsReplacer(const Output& position_ids); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/sdpa_to_paged_attention/prev_sequence_length_pattern.hpp b/src/common/transformations/include/transformations/sdpa_to_paged_attention/prev_sequence_length_pattern.hpp index fd4e22c69262ae..f5497207eb4e17 100644 --- a/src/common/transformations/include/transformations/sdpa_to_paged_attention/prev_sequence_length_pattern.hpp +++ b/src/common/transformations/include/transformations/sdpa_to_paged_attention/prev_sequence_length_pattern.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API PrevSequenceLengthPattern; class ov::pass::PrevSequenceLengthPattern : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PrevSequenceLengthPattern", "0"); + OPENVINO_MATCHER_PASS_RTTI("PrevSequenceLengthPattern"); explicit PrevSequenceLengthPattern(std::shared_ptr prev_max_seq_len, std::shared_ptr batch_dim); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/sdpa_to_paged_attention/state_management_pattern.hpp b/src/common/transformations/include/transformations/sdpa_to_paged_attention/state_management_pattern.hpp index feab06ccc0cd5d..79b4f444cfa791 100644 --- a/src/common/transformations/include/transformations/sdpa_to_paged_attention/state_management_pattern.hpp +++ b/src/common/transformations/include/transformations/sdpa_to_paged_attention/state_management_pattern.hpp @@ -17,7 +17,7 @@ class TRANSFORMATIONS_API StateManagementPattern; class ov::pass::StateManagementPattern : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("StateManagementPattern", "0"); + OPENVINO_MATCHER_PASS_RTTI("StateManagementPattern"); StateManagementPattern(ParameterVector& kv_parameters, ParameterVector& model_remaining_params, const std::shared_ptr& sliding_window, @@ -28,4 +28,4 @@ class ov::pass::StateManagementPattern : public ov::pass::MatcherPass { ResultVector& score_results, bool use_block_indices, bool use_score_outputs); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/sdpa_to_paged_attention/total_sequence_length_pattern.hpp b/src/common/transformations/include/transformations/sdpa_to_paged_attention/total_sequence_length_pattern.hpp index c6b319a389ecaa..b5ecb96fa95198 100644 --- a/src/common/transformations/include/transformations/sdpa_to_paged_attention/total_sequence_length_pattern.hpp +++ b/src/common/transformations/include/transformations/sdpa_to_paged_attention/total_sequence_length_pattern.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API TotalSequenceLengthPattern; class ov::pass::TotalSequenceLengthPattern : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TotalSequenceLengthPattern", "0"); + OPENVINO_MATCHER_PASS_RTTI("TotalSequenceLengthPattern"); explicit TotalSequenceLengthPattern(const std::shared_ptr& max_context_len); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/broadcast_const_range_replacement.hpp b/src/common/transformations/include/transformations/smart_reshape/broadcast_const_range_replacement.hpp index 04f7de9a336414..12cfe17a115592 100644 --- a/src/common/transformations/include/transformations/smart_reshape/broadcast_const_range_replacement.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/broadcast_const_range_replacement.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API BroadcastConstRangeReplacement; class ov::pass::BroadcastConstRangeReplacement : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BroadcastConstRangeReplacement", "0"); + OPENVINO_MATCHER_PASS_RTTI("BroadcastConstRangeReplacement"); BroadcastConstRangeReplacement(); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/matmul_sr.hpp b/src/common/transformations/include/transformations/smart_reshape/matmul_sr.hpp index 4e21e767d1bce6..cec7ce6bcd074e 100644 --- a/src/common/transformations/include/transformations/smart_reshape/matmul_sr.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/matmul_sr.hpp @@ -30,16 +30,16 @@ class TRANSFORMATIONS_API TransposeMatMul; class ov::pass::ReshapeAMatMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapeAMatMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapeAMatMul"); ReshapeAMatMul(); }; class ov::pass::ReshapeBMatMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapeBMatMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapeBMatMul"); ReshapeBMatMul(); }; class ov::pass::TransposeMatMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeMatMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeMatMul"); TransposeMatMul(); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp b/src/common/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp index d2eaf125e0dd5c..c8d756252509e1 100644 --- a/src/common/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp @@ -36,12 +36,12 @@ class TRANSFORMATIONS_API Proposal4Scales; class ov::pass::Proposal1Scales : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("Proposal1Scales", "0"); + OPENVINO_MATCHER_PASS_RTTI("Proposal1Scales"); Proposal1Scales(); }; class ov::pass::Proposal4Scales : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("Proposal4Scales", "0"); + OPENVINO_MATCHER_PASS_RTTI("Proposal4Scales"); Proposal4Scales(); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/reshape_sinking.hpp b/src/common/transformations/include/transformations/smart_reshape/reshape_sinking.hpp index aeaf46ccde1c2b..dd64980a0d155a 100644 --- a/src/common/transformations/include/transformations/smart_reshape/reshape_sinking.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/reshape_sinking.hpp @@ -28,6 +28,6 @@ class TRANSFORMATIONS_API ReshapeSinkingMatMul; class ov::pass::ReshapeSinkingMatMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapeSinkingMatMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapeSinkingMatMul"); ReshapeSinkingMatMul(); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp b/src/common/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp index a973038bb30900..cab320f543f382 100644 --- a/src/common/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ReshapeTo1D; class ov::pass::ReshapeTo1D : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapeTo1D", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapeTo1D"); ReshapeTo1D(); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/shape_of_const_folding.hpp b/src/common/transformations/include/transformations/smart_reshape/shape_of_const_folding.hpp index d2bc029cce4682..fdb620bf588e72 100644 --- a/src/common/transformations/include/transformations/smart_reshape/shape_of_const_folding.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/shape_of_const_folding.hpp @@ -16,7 +16,7 @@ namespace pass { */ class TRANSFORMATIONS_API ShapeOfConstFolding : public MatcherPass { public: - OPENVINO_RTTI("ShapeOfConstFolding", "0"); + OPENVINO_MATCHER_PASS_RTTI("ShapeOfConstFolding"); ShapeOfConstFolding(); }; diff --git a/src/common/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp b/src/common/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp index f34b6d25a27e49..cafac7e77857fb 100644 --- a/src/common/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp @@ -27,7 +27,7 @@ class TRANSFORMATIONS_API SqueezeStridedSlice; class ov::pass::StridedSliceSqueeze : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("StridedSliceSqueeze", "0"); + OPENVINO_MATCHER_PASS_RTTI("StridedSliceSqueeze"); StridedSliceSqueeze(); }; @@ -39,6 +39,6 @@ class ov::pass::StridedSliceSqueeze : public ov::pass::MatcherPass { class ov::pass::SqueezeStridedSlice : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SqueezeStridedSlice", "0"); + OPENVINO_MATCHER_PASS_RTTI("SqueezeStridedSlice"); SqueezeStridedSlice(); }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp b/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp index c7cb03afd5ade4..20a43475233050 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ChainedMaximumOptimization; */ class ov::pass::ChainedMaximumOptimization : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ChainedMaximumOptimization", "0"); + OPENVINO_MATCHER_PASS_RTTI("ChainedMaximumOptimization"); ChainedMaximumOptimization(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/dereshape_matmul.hpp b/src/common/transformations/include/transformations/symbolic_transformations/dereshape_matmul.hpp index fa1c844faa7129..b1586741a05833 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/dereshape_matmul.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/dereshape_matmul.hpp @@ -62,7 +62,7 @@ class TRANSFORMATIONS_API DeReshapeFullyConnected; */ class ov::pass::DeReshapeMatMul : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DeReshapeMatMul", "0"); + OPENVINO_MATCHER_PASS_RTTI("DeReshapeMatMul"); DeReshapeMatMul(); }; @@ -87,6 +87,6 @@ class ov::pass::DeReshapeMatMul : public ov::pass::MatcherPass { */ class ov::pass::DeReshapeFullyConnected : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DeReshapeFullyConnected", "0"); + OPENVINO_MATCHER_PASS_RTTI("DeReshapeFullyConnected"); DeReshapeFullyConnected(); }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp b/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp index 5a12b5735ce428..524fef52846e5c 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API NopBroadcast; */ class ov::pass::NopBroadcast : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NopBroadcast", "0"); + OPENVINO_MATCHER_PASS_RTTI("NopBroadcast"); NopBroadcast(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/reshape_optimizations.hpp b/src/common/transformations/include/transformations/symbolic_transformations/reshape_optimizations.hpp index 5d84d83bad2de5..f23cfd580cbccf 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/reshape_optimizations.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/reshape_optimizations.hpp @@ -52,6 +52,6 @@ class TRANSFORMATIONS_API ReshapeOptimizations; */ class ov::pass::ReshapeOptimizations : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReshapeOptimizations", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReshapeOptimizations"); ReshapeOptimizations(); }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp index c6a99c90122544..6197ad4c246f6a 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp @@ -58,6 +58,6 @@ class ov::pass::SymbolicPropagation : public ov::pass::ModelPass { */ class ov::pass::LabelResolvingThroughSelect : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LabelResolvingThroughSelect", "0"); + OPENVINO_MATCHER_PASS_RTTI("LabelResolvingThroughSelect"); LabelResolvingThroughSelect(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_base.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_base.hpp index 013799e854df8f..d16bf401576b96 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_base.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_base.hpp @@ -27,7 +27,7 @@ class TRANSFORMATIONS_API TSForwardBase; */ class ov::pass::transpose_sinking::TSForwardBase : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSForwardBase", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSForwardBase"); TSForwardBase() = default; template diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp index 9534380d51253e..b8eca55dcb9685 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSBinaryForward : public ov::pass::transpose_ */ class ov::pass::transpose_sinking::TSBinaryBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSBinaryBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSBinaryBackward"); TSBinaryBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp index cc0ccc2c194dbf..5b6477da94a80d 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSConcatForward : public ov::pass::transpose_ */ class ov::pass::transpose_sinking::TSConcatBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSConcatBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSConcatBackward"); TSConcatBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp index 185bea0105ec4d..741c56d5be0de7 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp @@ -35,6 +35,6 @@ class ov::pass::transpose_sinking::TSCumSumForward : public ov::pass::transpose_ */ class ov::pass::transpose_sinking::TSCumSumBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSBinaryBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSCumSumBackward"); TSCumSumBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp index 9775e57d61146b..e1a4f34a109eec 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp @@ -39,6 +39,6 @@ class ov::pass::transpose_sinking::TSDataMovementForward : public ov::pass::tran */ class ov::pass::transpose_sinking::TSDataMovementBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSDataMovementBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSDataMovementBackward"); TSDataMovementBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_fuse.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_fuse.hpp index 974e7accc4d808..6f5a654e70e81c 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_fuse.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_fuse.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API TSFuse; */ class ov::pass::transpose_sinking::TSFuse : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TSFuse", "0"); + OPENVINO_MATCHER_PASS_RTTI("TSFuse"); TSFuse(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp index 5bc7cea340cb72..891b8bd85c2ed4 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSGatherForward : public ov::pass::transpose_ */ class ov::pass::transpose_sinking::TSGatherBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSGatherBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSGatherBackward"); TSGatherBackward(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp index dfdd062e1f7ce3..90ae417aca9fc6 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSInterpolateForward : public ov::pass::trans */ class ov::pass::transpose_sinking::TSInterpolateBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSInterpolateBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSInterpolateBackward"); TSInterpolateBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp index a983519a3012d6..d2992bc8a4abd8 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSReductionForward : public ov::pass::transpo */ class ov::pass::transpose_sinking::TSReductionBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSReductionBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSReductionBackward"); TSReductionBackward(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_reset_no_sinking_attribute.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_reset_no_sinking_attribute.hpp index 240c6e8342c069..3aa2a770390e35 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_reset_no_sinking_attribute.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_reset_no_sinking_attribute.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API TSResetNoSinkingAttribute; */ class ov::pass::transpose_sinking::TSResetNoSinkingAttribute : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSResetNoSinkingAttribute", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSResetNoSinkingAttribute"); TSResetNoSinkingAttribute(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp index 5e10a7f0e8a930..12ccc614861140 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp @@ -27,6 +27,6 @@ class ov::pass::transpose_sinking::TSSliceForward : public ov::pass::transpose_s class ov::pass::transpose_sinking::TSSliceBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSSliceBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSSliceBackward"); TSSliceBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp index cb24aa5273906f..b21bada67ad368 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp @@ -38,6 +38,6 @@ class ov::pass::transpose_sinking::TSSplitForward : public ov::pass::transpose_s */ class ov::pass::transpose_sinking::TSSplitBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSSplitBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSSplitBackward"); TSSplitBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp index 752e040ab52cab..0d86d0a4c29242 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSSqueezeForward : public ov::pass::transpose */ class ov::pass::transpose_sinking::TSSqueezeBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSSqueezeBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSSqueezeBackward"); TSSqueezeBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp index 9bb15894d70a81..ffd14ce9a38d84 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp @@ -36,6 +36,6 @@ class ov::pass::transpose_sinking::TSTileForward : public ov::pass::transpose_si */ class ov::pass::transpose_sinking::TSTileBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSBinaryBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSTileBackward"); TSTileBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp index c8148e912b30c0..1d745ada561224 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSUnaryForward : public ov::pass::transpose_s */ class ov::pass::transpose_sinking::TSUnaryBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TSUnaryBackwardMultiConsumers", "0"); + OPENVINO_MATCHER_PASS_RTTI("TSUnaryBackward"); TSUnaryBackward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp index 1ee195624cb801..60e5f8f7893961 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp @@ -37,6 +37,6 @@ class ov::pass::transpose_sinking::TSUnsqueezeForward : public ov::pass::transpo */ class ov::pass::transpose_sinking::TSUnsqueezeBackward : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::pass::TSUnsqueezeBackward", "0"); + OPENVINO_MATCHER_PASS_RTTI("ov::pass::TSUnsqueezeBackward"); TSUnsqueezeBackward(); }; diff --git a/src/common/transformations/src/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.cpp b/src/common/transformations/src/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.cpp index 7c22dbdfeac53d..5abe0b5b8c87e3 100644 --- a/src/common/transformations/src/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.cpp @@ -31,7 +31,7 @@ namespace ov { namespace pass { class InitNMSPath : public pass::MatcherPass { public: - OPENVINO_RTTI("InitNMSPath", "0"); + OPENVINO_MATCHER_PASS_RTTI("InitNMSPath"); InitNMSPath() { MATCHER_SCOPE(InitNMSPath); auto nms_pattern = pattern::wrap_type(); diff --git a/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp b/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp index 4e9715883ec9f8..1e6d7caec39ac0 100644 --- a/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp @@ -224,6 +224,7 @@ void add_node_with_inputs_to_vector(const std::shared_ptr& node, NodeV } // namespace class SplitConcat : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::init::SplitConcat"); SplitConcat(NodeVector& nodes_to_fuse) { MATCHER_SCOPE(SplitConcat); auto split_p = pattern::wrap_type(); @@ -280,6 +281,7 @@ class SplitConcat : public ov::pass::MatcherPass { class Gather : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::init::Gather"); Gather(NodeVector& nodes_to_fuse) { MATCHER_SCOPE(Gather); auto input_p = pattern::any_input(pattern::has_static_rank()); @@ -341,6 +343,7 @@ namespace prop { class Binary : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::prop::Binary"); Binary() { MATCHER_SCOPE(Binary); auto pattern_root = pattern::wrap_type(); @@ -426,6 +429,7 @@ class Binary : public ov::pass::MatcherPass { class Convolution : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::prop::Convolution"); Convolution() { MATCHER_SCOPE(Convolution); auto input_p = pattern::any_input(ric_attr::has>); @@ -448,6 +452,7 @@ class Convolution : public ov::pass::MatcherPass { class GroupConvolution : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::prop::GroupConvolution"); GroupConvolution() { MATCHER_SCOPE(GroupConvolution); auto input_p = pattern::any_input(ric_attr::has>); @@ -504,6 +509,7 @@ class GroupConvolution : public ov::pass::MatcherPass { class ShapeOf : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::prop::ShapeOf"); ShapeOf() { MATCHER_SCOPE(ShapeOf); auto pattern_root = pattern::wrap_type(); @@ -520,6 +526,7 @@ class ShapeOf : public ov::pass::MatcherPass { class PassThrough : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::prop::PassThrough"); PassThrough() { MATCHER_SCOPE(PassThrough); auto pattern_root = pattern::wrap_type>); @@ -570,6 +578,7 @@ class Transpose : public ov::pass::MatcherPass { class Unsupported : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::prop::Unsupported"); Unsupported() { MATCHER_SCOPE(Unsupported); auto pattern_root = pattern::any_input(); @@ -605,6 +614,7 @@ bool need_to_erase_ric(const Output& output) { class InsertReverseInputChannel : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::fuse::InsertReverseInputChannel"); InsertReverseInputChannel(NodeVector& fused_nodes) { MATCHER_SCOPE(InsertReverseInputChannel); auto pattern_root = pattern::any_input(); @@ -628,6 +638,7 @@ class InsertReverseInputChannel : public ov::pass::MatcherPass { class EraseSplitConcat : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::fuse::EraseSplitConcat"); EraseSplitConcat() { MATCHER_SCOPE(EraseSplitConcat); auto input_p = pattern::any_input(); @@ -649,6 +660,7 @@ class EraseSplitConcat : public ov::pass::MatcherPass { class EraseGather : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::fuse::EraseGather"); EraseGather() { MATCHER_SCOPE(EraseGather); auto input_p = pattern::any_input(); @@ -672,6 +684,7 @@ class EraseGather : public ov::pass::MatcherPass { namespace back_prop { class Binary : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::back_prop::Binary"); Binary() { MATCHER_SCOPE(Binary); auto fake_quantize_pattern = @@ -755,6 +768,7 @@ class Binary : public ov::pass::MatcherPass { class ConvertPassThrough : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("pass::back_prop::ConvertPassThrough"); ConvertPassThrough() { MATCHER_SCOPE(ConvertPassThrough); auto pattern_root = pattern::wrap_type(pattern::has_static_rank()); diff --git a/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp b/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp index 2235e87c792b0d..fc667bd23a97b4 100644 --- a/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp +++ b/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp @@ -116,7 +116,7 @@ const std::shared_ptr propagate_through_ops = */ class PropagateUpMarkToKeepInMixedPrecision : public pass::MatcherPass { public: - OPENVINO_RTTI("PropagateUpMarkToKeepInMixedPrecision", "0"); + OPENVINO_MATCHER_PASS_RTTI("PropagateUpMarkToKeepInMixedPrecision"); PropagateUpMarkToKeepInMixedPrecision() { MATCHER_SCOPE(PropagateUpMarkToKeepInMixedPrecision); @@ -159,7 +159,7 @@ class PropagateUpMarkToKeepInMixedPrecision : public pass::MatcherPass { */ class PropagateDownMarkToKeepInMixedPrecision : public pass::MatcherPass { public: - OPENVINO_RTTI("PropagateDownMarkToKeepInMixedPrecision", "0"); + OPENVINO_MATCHER_PASS_RTTI("PropagateDownMarkToKeepInMixedPrecision"); PropagateDownMarkToKeepInMixedPrecision() { MATCHER_SCOPE(PropagateDownMarkToKeepInMixedPrecision); @@ -197,7 +197,7 @@ class PropagateDownMarkToKeepInMixedPrecision : public pass::MatcherPass { class InitMarkReduceOpPath : public pass::MatcherPass { public: - OPENVINO_RTTI("InitMarkReduceOpPath", "0"); + OPENVINO_MATCHER_PASS_RTTI("InitMarkReduceOpPath"); InitMarkReduceOpPath() { MATCHER_SCOPE(InitMarkReduceOpPath); @@ -217,7 +217,7 @@ class InitMarkReduceOpPath : public pass::MatcherPass { class PropagateMarkUpReduceOpPath : public pass::MatcherPass { public: - OPENVINO_RTTI("PropagateMarkUpReduceOpPath", "0"); + OPENVINO_MATCHER_PASS_RTTI("PropagateMarkUpReduceOpPath"); PropagateMarkUpReduceOpPath() { MATCHER_SCOPE(PropagateMarkUpReduceOpPath); @@ -244,8 +244,8 @@ class PropagateMarkUpReduceOpPath : public pass::MatcherPass { class MarkExp : public pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("MarkExp"); // only exponent that go into ReduceOp should be marked as precision sensitive and kept in f32 - OPENVINO_RTTI("MarkExp", "0"); MarkExp() { MATCHER_SCOPE(MarkExp); auto exp_pattern = pattern::wrap_type(); @@ -288,7 +288,7 @@ class MarkExpInReduceOpPath : public BackwardGraphRewrite { */ class MarkDivWithEps : public MatcherPass { public: - OPENVINO_RTTI("MarkDivWithEps", "0"); + OPENVINO_MATCHER_PASS_RTTI("MarkDivWithEps"); MarkDivWithEps() { MATCHER_SCOPE(MarkDivWithEps); @@ -367,7 +367,7 @@ class MarkDivWithEps : public MatcherPass { class PropagateDownDisableSensitivityForQuantized : public pass::MatcherPass { public: - OPENVINO_RTTI("DisableMarkingForQuantizedNodes", "0"); + OPENVINO_MATCHER_PASS_RTTI("PropagateDownDisableSensitivityForQuantized"); PropagateDownDisableSensitivityForQuantized() { MATCHER_SCOPE(PropagateDownDisableSensitivityForQuantized); diff --git a/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp b/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp index 9f3b6b976d14df..119816266ffdc4 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp @@ -1319,6 +1319,7 @@ ov::pass::ConvertLoopWithSlicedInputConcatOutputToLSTMSequence::ConvertLoopWithS class EliminateGatherWithRange : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("EliminateGatherWithRange"); EliminateGatherWithRange() { using namespace ov; using namespace ov::pass; From e32fc0cd5d219b8418ccb360d2389e8978f15efa Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Fri, 20 Dec 2024 18:37:51 +0100 Subject: [PATCH 44/60] [RTTI] Add OPENVINO_GRAPH_REWRITE_RTTI definition (#28165) ### Details: - Added RTTI definition for passes derived from `ov::pass::GraphRewrite`. - Applied where applicable. ### Tickets: - CVS-159694 Signed-off-by: Tomasz Jankowski --- .../include/low_precision/low_precision.hpp | 2 +- .../include/compress_quantize_weights.hpp | 2 +- .../offline_transformations/include/pruning.hpp | 4 ++-- .../compress_float_constants.hpp | 2 +- .../common_optimizations/concat_reduce_fusion.hpp | 2 +- .../convert_nms_gather_path_to_unsigned.hpp | 2 +- .../fuse_rotary_positional_embeddings.hpp | 2 +- .../common_optimizations/gelu_fusion.hpp | 2 +- .../common_optimizations/hsigmoid_fusion.hpp | 2 +- .../common_optimizations/hswish_fusion.hpp | 2 +- .../common_optimizations/lin_op_sequence_fusion.hpp | 2 +- .../common_optimizations/lstm_cell_fusion.hpp | 2 +- .../move_eltwise_up_data_movement.hpp | 2 +- .../common_optimizations/mvn_fusion.hpp | 2 +- .../common_optimizations/nop_elimination.hpp | 2 +- .../common_optimizations/pad_fusion.hpp | 2 +- .../common_optimizations/prelu_fusion.hpp | 2 +- .../common_optimizations/pull_through_reduce.hpp | 2 +- .../remove_filtering_boxes_by_size.hpp | 2 +- .../common_optimizations/swish_fusion.hpp | 2 +- .../common_optimizations/transpose_sinking.hpp | 2 +- .../bidirectional_sequences_decomposition.hpp | 2 +- .../convert_bitwise_to_logical_bool.hpp | 2 +- .../op_conversions/convert_reduce_to_pooling.hpp | 2 +- .../op_conversions/convert_reduce_to_reshape.hpp | 2 +- .../convert_sequences_to_tensor_iterator.hpp | 2 +- .../op_conversions/convert_ti_to_sequences.hpp | 4 ++-- .../transpose_sinking/ts_general.hpp | 4 ++-- .../openvino/pass/backward_graph_rewrite.hpp | 2 +- src/core/include/openvino/pass/graph_rewrite.hpp | 13 ++++++++++++- src/core/src/pass/graph_rewrite.cpp | 2 ++ src/core/tests/graph_rewrite.cpp | 2 +- src/core/tests/pass_config.cpp | 4 ++-- src/core/tests/pattern.cpp | 2 ++ .../arm/pass/convert_reduce_multi_axis.hpp | 2 +- .../arm/pass/convert_reduce_no_keep_dims.hpp | 2 +- .../common/pass/causal_mask_preprocess_fusion.hpp | 4 ++-- .../common/pass/rnn_sequences_optimization.hpp | 2 +- .../cpu_opset/x64/pass/mha_fusion.hpp | 2 +- .../plugin/transformations/indirect_kv_cache.hpp | 2 +- .../plugin/transformations/kv_cache_compression.hpp | 2 +- .../src/plugin/transformations/kv_cache_fusion.hpp | 2 +- .../src/plugin/transformations/transpose_fusion.hpp | 2 +- 43 files changed, 61 insertions(+), 46 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp index b3b92340303ced..483ec19f10a224 100644 --- a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp +++ b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp @@ -56,7 +56,7 @@ class ov::pass::low_precision::MarkupOptimizations : public ov::pass::ModelPass class ov::pass::low_precision::TypeRelaxedReplacer : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("TypeRelaxedReplacer", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("low_precision::TypeRelaxedReplacer"); TypeRelaxedReplacer(); }; diff --git a/src/common/offline_transformations/include/compress_quantize_weights.hpp b/src/common/offline_transformations/include/compress_quantize_weights.hpp index 9b2792caf93d47..90a2a434ae346a 100644 --- a/src/common/offline_transformations/include/compress_quantize_weights.hpp +++ b/src/common/offline_transformations/include/compress_quantize_weights.hpp @@ -102,6 +102,6 @@ class ov::pass::CompressWeightsWithFakeConvert : public ov::pass::MatcherPass { class ov::pass::CompressQuantizeWeights : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("CompressQuantizeWeights", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("CompressQuantizeWeights"); CompressQuantizeWeights(); }; diff --git a/src/common/offline_transformations/include/pruning.hpp b/src/common/offline_transformations/include/pruning.hpp index e573108a89eb86..13e46777fde205 100644 --- a/src/common/offline_transformations/include/pruning.hpp +++ b/src/common/offline_transformations/include/pruning.hpp @@ -29,7 +29,7 @@ class Pruning; */ class ov::pass::InitMasks : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("InitMasks", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("InitMasks"); InitMasks(); }; @@ -56,7 +56,7 @@ class ov::pass::InitConstMask : public ov::pass::MatcherPass { */ class ov::pass::PropagateMasks : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("PropagateMasks", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("PropagateMasks"); PropagateMasks(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp b/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp index d8e0eb8c154766..2f87e5caa8c483 100644 --- a/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/compress_float_constants.hpp @@ -41,7 +41,7 @@ class ov::pass::CompressFloatConstantsImpl : public ov::pass::MatcherPass { */ class ov::pass::CompressFloatConstants : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("CompressFloatConstants", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("CompressFloatConstants"); /// @brief Transformation constructor /// @param postponed Postponed compression, see ov::pass::CompressFloatConstantsImpl for details. CompressFloatConstants(bool postponed = false) { diff --git a/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp index ae02a2a50b4a2b..a1edbb5dafd32e 100644 --- a/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/concat_reduce_fusion.hpp @@ -76,6 +76,6 @@ class ov::pass::PullSqueezeThroughEltwise : public ov::pass::MatcherPass { class ov::pass::ConcatReduceFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConcatReduceFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConcatReduceFusion"); ConcatReduceFusion(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.hpp b/src/common/transformations/include/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.hpp index d778c255160281..6982e985016402 100644 --- a/src/common/transformations/include/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/convert_nms_gather_path_to_unsigned.hpp @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API ConvertNmsGatherPathToUnsigned; */ class ov::pass::ConvertNmsGatherPathToUnsigned : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertNmsGatherPathToUnsigned", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertNmsGatherPathToUnsigned"); ConvertNmsGatherPathToUnsigned(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp b/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp index 8c45842b274dd5..51177738c1e2d5 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp @@ -90,7 +90,7 @@ class ov::pass::RoPEShareCosSin : public ov::pass::MatcherPass { */ class ov::pass::RoPEFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("RoPEFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("RoPEFusion"); RoPEFusion(bool support_2d_rope = false) { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp index 1388cda8268e17..7fd9826b0374be 100644 --- a/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/gelu_fusion.hpp @@ -96,7 +96,7 @@ class ov::pass::GeluFusionWithTanhNoPower : public ov::pass::MatcherPass { */ class ov::pass::GeluFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("GeluFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("GeluFusion"); GeluFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp index 72da0b538b9336..5b301246c7a541 100644 --- a/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/hsigmoid_fusion.hpp @@ -79,7 +79,7 @@ class ov::pass::HSigmoidFusionWithClampDiv : public ov::pass::MatcherPass { */ class ov::pass::HSigmoidFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("HSigmoidFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("HSigmoidFusion"); HSigmoidFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp index 40a91cc4f08a9d..7b1faa990dd360 100644 --- a/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/hswish_fusion.hpp @@ -68,7 +68,7 @@ class ov::pass::HSwishFusionWithClamp : public ov::pass::MatcherPass { */ class ov::pass::HSwishFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("HSwishFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("HSwishFusion"); HSwishFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp index 091deb3e32e58f..cc71a676eb3b60 100644 --- a/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/lin_op_sequence_fusion.hpp @@ -45,7 +45,7 @@ class ov::pass::MultiplyMultiplyFusion : public ov::pass::MatcherPass { */ class ov::pass::LinOpSequenceFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("LinOpSequenceFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("LinOpSequenceFusion"); LinOpSequenceFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp index 1bc5344b31e09b..2acbbf626cd6f3 100644 --- a/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/lstm_cell_fusion.hpp @@ -45,7 +45,7 @@ class ov::pass::LSTMCellFusionWithSplitWeights : public ov::pass::MatcherPass { */ class ov::pass::LSTMCellFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("LSTMCellFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("LSTMCellFusion"); LSTMCellFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp b/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp index d691eee7c29795..dd303ed1bfec45 100644 --- a/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/move_eltwise_up_data_movement.hpp @@ -56,7 +56,7 @@ class TRANSFORMATIONS_API MoveEltwiseUpThroughDataMovPerChannel : public ov::pas class TRANSFORMATIONS_API MoveEltwiseUpThroughDataMov : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("MoveEltwiseUpThroughDataMov", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("MoveEltwiseUpThroughDataMov"); MoveEltwiseUpThroughDataMov(std::vector allowed_data_movement_ops = get_default_allowed_ops()) { this->add_matcher(allowed_data_movement_ops); this->add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp index 4a625816261c9b..5433a081768090 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mvn_fusion.hpp @@ -50,7 +50,7 @@ class ov::pass::MVNFusionWithConstantsInside : public ov::pass::MatcherPass { */ class ov::pass::MVNFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("MVNFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("MVNFusion"); MVNFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp b/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp index ea7d428bc1eea7..55cb8eeb2cf0c1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/nop_elimination.hpp @@ -145,7 +145,7 @@ class ov::pass::EliminateScatterUpdate : public ov::pass::MatcherPass { class ov::pass::NopElimination : public GraphRewrite { public: - OPENVINO_RTTI("NopElimination", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("NopElimination"); NopElimination(bool use_shape_for_elimination = true); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp index 018c098d221d3e..628391547da82c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/pad_fusion.hpp @@ -90,7 +90,7 @@ class ov::pass::PadFusionGroupConvolutionBackpropData : public ov::pass::Matcher class ov::pass::PadFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("PadFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("PadFusion"); PadFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp index bb1c3206f1ea3d..729d32375c9eb1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/prelu_fusion.hpp @@ -148,7 +148,7 @@ class ov::pass::PReluFusionNegReluMulAdd : public ov::pass::MatcherPass { */ class ov::pass::PReluFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("PReluFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("PReluFusion"); PReluFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp b/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp index 2824ba2a8b374d..86f54e9dc03ac5 100644 --- a/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/pull_through_reduce.hpp @@ -49,7 +49,7 @@ class ov::pass::PullReshapeThroughReduce : public ov::pass::MatcherPass { */ class ov::pass::PullThroughReduce : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("PullThroughReduce", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("PullThroughReduce"); PullThroughReduce() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp b/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp index 02ebb6eec3a48d..0965f06a465770 100644 --- a/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/remove_filtering_boxes_by_size.hpp @@ -21,7 +21,7 @@ class TRANSFORMATIONS_API RemoveFilteringBoxesBySize; class ov::pass::FuseFilteringBoxesBySize : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("FuseFilteringBoxesBySize", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("FuseFilteringBoxesBySize"); FuseFilteringBoxesBySize(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp index 2bee12da7d9c41..d9a1ae6e321f35 100644 --- a/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/swish_fusion.hpp @@ -68,7 +68,7 @@ class ov::pass::SwishFusionWithoutBeta : public ov::pass::MatcherPass { */ class ov::pass::SwishFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("SwishFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("SwishFusion"); SwishFusion() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp b/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp index 31de821878e971..d9f5c257f6dda1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/transpose_sinking.hpp @@ -81,7 +81,7 @@ class ov::pass::TransposeFuse : public ov::pass::MatcherPass { */ class ov::pass::TransposeSinking : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("TransposeSinking", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("TransposeSinking"); TransposeSinking() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp index f74f08a9b8c061..a49c1655537844 100644 --- a/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp @@ -67,7 +67,7 @@ class ov::pass::BidirectionalRNNSequenceDecomposition : public ov::pass::Matcher class ov::pass::BidirectionalSequenceDecomposition : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("BidirectionalSequenceDecomposition", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("BidirectionalSequenceDecomposition"); BidirectionalSequenceDecomposition() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp index a5e130e2389af2..64821ce658eb66 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp @@ -43,7 +43,7 @@ class ov::pass::ConvertBitwiseXorToLogicalXor : public ov::pass::MatcherPass { */ class ConvertBitwiseToLogical : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertBitwiseToLogical", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertBitwiseToLogical"); ConvertBitwiseToLogical() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp index 36d2b052243382..32a2f7a3ace512 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp @@ -61,7 +61,7 @@ class ov::pass::ConvertReduceSumToPooling : public ConvertReduceBase { class ov::pass::ConvertReduceToPooling : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertReduceToPooling", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertReduceToPooling"); ConvertReduceToPooling() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp index f020e768be2feb..9eeb3e5c0f8da6 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp @@ -84,7 +84,7 @@ class ov::pass::ConvertReduceLogicalOrToReshape : public CvtReduceBase { class ov::pass::ConvertReduceToReshape : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertReduceToReshape", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertReduceToReshape"); // Handling reduce if it can be converted to reshape (check input/output tensor) ConvertReduceToReshape() { // Redundant reduce based on its mode diff --git a/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp b/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp index 46a7e8ff0317e9..e108f4a50ce1f2 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp @@ -59,6 +59,6 @@ class ov::pass::ConvertLSTMSequenceToTensorIterator : public ov::pass::MatcherPa class ov::pass::ConvertSequenceToTensorIterator : public GraphRewrite { public: - OPENVINO_RTTI("ConvertSequenceToTensorIterator", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertSequenceToTensorIterator"); ConvertSequenceToTensorIterator(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp index fb53cc81743ec4..f4bd61573e3ac2 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp @@ -66,7 +66,7 @@ class ov::pass::ConvertTensorIteratorToGRUSequence : public ov::pass::MatcherPas class ov::pass::ConvertTensorIteratorToSequence : public GraphRewrite { public: - OPENVINO_RTTI("ConvertTensorIteratorToSequence", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertTensorIteratorToSequence"); ConvertTensorIteratorToSequence(); }; @@ -88,7 +88,7 @@ class ov::pass::ConvertLoopWithScatterUpdateToLSTMSequence : public ov::pass::Ma */ class ov::pass::ConvertLoopToLSTMSequence : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertLoopToLSTMSequence", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertLoopToLSTMSequence"); ConvertLoopToLSTMSequence() { add_matcher(); add_matcher(); diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp index 09d4d5819322a9..b39a25b9db0872 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp @@ -29,7 +29,7 @@ using TransposeSinkingGeneral = ov::pass::transpose_sinking::TSGeneral; */ class ov::pass::transpose_sinking::TSGeneralForward : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("TSGeneralForward", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("TSGeneralForward"); TSGeneralForward(); }; @@ -40,7 +40,7 @@ class ov::pass::transpose_sinking::TSGeneralForward : public ov::pass::GraphRewr */ class ov::pass::transpose_sinking::TSGeneralBackward : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("TSGeneralBackward", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("TSGeneralBackward"); TSGeneralBackward(); }; diff --git a/src/core/include/openvino/pass/backward_graph_rewrite.hpp b/src/core/include/openvino/pass/backward_graph_rewrite.hpp index 7e7f6f118efe8d..cb7f24a76272bb 100644 --- a/src/core/include/openvino/pass/backward_graph_rewrite.hpp +++ b/src/core/include/openvino/pass/backward_graph_rewrite.hpp @@ -11,7 +11,7 @@ namespace ov { namespace pass { class OPENVINO_API BackwardGraphRewrite : public GraphRewrite { public: - OPENVINO_RTTI("ov::pass::BackwardGraphRewrite"); + OPENVINO_GRAPH_REWRITE_RTTI("ov::pass::BackwardGraphRewrite"); BackwardGraphRewrite() = default; diff --git a/src/core/include/openvino/pass/graph_rewrite.hpp b/src/core/include/openvino/pass/graph_rewrite.hpp index ec8e1339912513..4628875fef8fd9 100644 --- a/src/core/include/openvino/pass/graph_rewrite.hpp +++ b/src/core/include/openvino/pass/graph_rewrite.hpp @@ -8,8 +8,19 @@ #include #include +#include "openvino/core/rtti.hpp" #include "openvino/pass/matcher_pass.hpp" +#define _OPENVINO_GRAPH_REWRITE_RTTI_WITH_TYPE(TYPE_NAME) _OPENVINO_GRAPH_REWRITE_RTTI_WITH_TYPE_VERSION(TYPE_NAME, "0") + +#define _OPENVINO_GRAPH_REWRITE_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \ + _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT(TYPE_NAME, VERSION_NAME, ::ov::pass::GraphRewrite) + +#define OPENVINO_GRAPH_REWRITE_RTTI(...) \ + _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR_2(__VA_ARGS__, \ + _OPENVINO_GRAPH_REWRITE_RTTI_WITH_TYPE_VERSION, \ + _OPENVINO_GRAPH_REWRITE_RTTI_WITH_TYPE)(__VA_ARGS__)) + namespace ov { namespace pass { /// \brief GraphRewrite is a container for MatcherPasses that allows to run them on Function @@ -80,7 +91,7 @@ class OPENVINO_API GraphRewrite : public ModelPass { /// /// class ov::pass::LinFusions: public ov::pass::GraphRewrite { /// public: - /// OPENVINO_RTTI("LinFusion"); + /// OPENVINO_GRAPH_REWRITE_RTTI("LinFusion"); /// Fusions() { /// add_matcher(); /// add_matcher(); diff --git a/src/core/src/pass/graph_rewrite.cpp b/src/core/src/pass/graph_rewrite.cpp index 029f572189f829..f8a1f1e723d7a7 100644 --- a/src/core/src/pass/graph_rewrite.cpp +++ b/src/core/src/pass/graph_rewrite.cpp @@ -253,6 +253,8 @@ void ov::pass::GraphRewrite::set_pass_config(const std::shared_ptr& // For example: // // class ExampleGraphRewrite: public pass::GraphRewrite { + // public: + // OPENVINO_GRAPH_REWRITE_RTTI("ExampleGraphRewrite"); // ExampleGraphRewrite() { // add_mather(); // add_mather(); diff --git a/src/core/tests/graph_rewrite.cpp b/src/core/tests/graph_rewrite.cpp index 20955f5a5d6b1f..c47b6d5a473666 100644 --- a/src/core/tests/graph_rewrite.cpp +++ b/src/core/tests/graph_rewrite.cpp @@ -58,7 +58,7 @@ class GatherNodesPass : public ov::pass::MatcherPass { class Anchor : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("Anchor"); + OPENVINO_GRAPH_REWRITE_RTTI("Anchor"); Anchor() : GraphRewrite() {} }; diff --git a/src/core/tests/pass_config.cpp b/src/core/tests/pass_config.cpp index 053cb2b62aff32..56d9b2fedf8197 100644 --- a/src/core/tests/pass_config.cpp +++ b/src/core/tests/pass_config.cpp @@ -66,7 +66,7 @@ class TestModelPass : public pass::ModelPass { class TestGraphRewritePass : public pass::GraphRewrite { public: - OPENVINO_RTTI("TestGraphRewritePass"); + OPENVINO_GRAPH_REWRITE_RTTI("TestGraphRewritePass"); TestGraphRewritePass() { add_matcher(); add_matcher(); @@ -284,7 +284,7 @@ class TestNestedMatcher : public ov::pass::MatcherPass { class TestNestedGraphRewrite : public pass::GraphRewrite { public: - OPENVINO_RTTI("TestNestedGraphRewrite"); + OPENVINO_GRAPH_REWRITE_RTTI("TestNestedGraphRewrite"); TestNestedGraphRewrite() { add_matcher(); } diff --git a/src/core/tests/pattern.cpp b/src/core/tests/pattern.cpp index 982e59b55f0f97..5bb961e57db1c2 100644 --- a/src/core/tests/pattern.cpp +++ b/src/core/tests/pattern.cpp @@ -82,6 +82,8 @@ static std::shared_ptr construct_mean_graph() { class TestGraphRewrite : public ov::pass::GraphRewrite { public: + OPENVINO_GRAPH_REWRITE_RTTI("TestGraphRewrite"); + void construct_multiply_by_one() { // pattern #1 : a * 1 = a auto iconst1 = construct_constant_node(1); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp index 947d7ee476bc81..4b0bb0e8c81f8a 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp @@ -82,7 +82,7 @@ class ConvertReduceSum : public ConvertReduceMultiAxisBase { class ConvertReduceMultiAxis : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertReduceMultiAxis", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertReduceMultiAxis"); ConvertReduceMultiAxis() { add_matcher(); add_matcher(); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp index 6cc683154cc175..9684a047afa08e 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp @@ -59,7 +59,7 @@ class ConvertReduction : public ConvertReduceNoKeepDimsBase { class ConvertReduceNoKeepDims : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("ConvertReduceNoKeepDims", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("ConvertReduceNoKeepDims"); ConvertReduceNoKeepDims() { add_matcher>(); add_matcher>(); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp index f9db370aea49bb..4a46a042722a12 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp @@ -11,9 +11,9 @@ namespace intel_cpu { class CausalMaskPreprocessFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("CausalMaskPreprocessFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("CausalMaskPreprocessFusion"); CausalMaskPreprocessFusion(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp index a63a3dce8219c2..4cfc27d7836180 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp @@ -29,7 +29,7 @@ class OptimizeRNNSequenceTransposes : public ov::pass::MatcherPass { class OptimizeSequenceTransposes : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("OptimizeSequenceTransposes", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("OptimizeSequenceTransposes"); OptimizeSequenceTransposes(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp index fe4f4ccae04f1c..d84c11af9801e5 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp @@ -54,7 +54,7 @@ class MHAQuantFusion2 : public MHAFusionBase { class MHAFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("MHAFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("MHAFusion"); MHAFusion() { add_matcher(); add_matcher(); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp b/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp index 0fc96b6215ba95..f76edeeb4f20da 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp @@ -38,7 +38,7 @@ namespace intel_gpu { /// └───────────┘ └───────────────┘ class IndirectKVCache : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("IndirectKVCache", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("IndirectKVCache"); IndirectKVCache(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp index 1587021a03ed36..036fdb78914891 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp @@ -32,7 +32,7 @@ namespace intel_gpu { class KVCacheCompression : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("KVCacheCompression", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("KVCacheCompression"); KVCacheCompression(ov::element::Type compression_dt); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.hpp index dbe147da8d46b7..614d3ba5020363 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.hpp @@ -76,7 +76,7 @@ namespace intel_gpu { /// └─────────────┘ └───────────┘ └─────────┘ class KVCacheFusion : public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("KVCacheFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("KVCacheFusion"); KVCacheFusion(); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp index a845c7a7aa86b0..3a985a33c722df 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class TransposeFusion: public ov::pass::GraphRewrite { public: - OPENVINO_RTTI("TransposeFusion", "0"); + OPENVINO_GRAPH_REWRITE_RTTI("TransposeFusion"); TransposeFusion(bool supports_immad = false); }; From 8d74cbb8e1af7c66ccee202fec5a18565e5b37b0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 21 Dec 2024 00:21:36 +0400 Subject: [PATCH 45/60] Find python in extensions safely (#28172) ### Details: - See https://github.com/microsoft/vcpkg/pull/42259#issuecomment-2528701658 --- docs/snippets/CMakeLists.txt | 15 +++++++++------ src/core/template_extension/CMakeLists.txt | 15 +++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index ec1cf14bd0e60b..18d176212a0212 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -122,12 +122,15 @@ set(TARGET_NAME_PY "ov_integration_snippet_py") cmake_minimum_required(VERSION 3.10) set(CMAKE_CXX_STANDARD 11) -find_package(Python3 REQUIRED) - execute_process( - COMMAND ${Python3_EXECUTABLE} -c "from openvino.utils import get_cmake_path; print(get_cmake_path(), end='')" - OUTPUT_VARIABLE OpenVINO_DIR_PY - ERROR_QUIET - ) +if(NOT CMAKE_CROSSCOMPILING) + find_package(Python3 QUIET COMPONENTS Interpreter) + if(Python3_Interpreter_FOUND) + execute_process( + COMMAND ${Python3_EXECUTABLE} -c "from openvino.utils import get_cmake_path; print(get_cmake_path(), end='')" + OUTPUT_VARIABLE OpenVINO_DIR_PY + ERROR_QUIET) + endif() +endif() find_package(OpenVINO REQUIRED PATHS "${OpenVINO_DIR_PY}") diff --git a/src/core/template_extension/CMakeLists.txt b/src/core/template_extension/CMakeLists.txt index aa8030e78d7171..3cfcfcd058ff94 100644 --- a/src/core/template_extension/CMakeLists.txt +++ b/src/core/template_extension/CMakeLists.txt @@ -8,12 +8,15 @@ set(CMAKE_CXX_STANDARD 11) set(TARGET_NAME "openvino_template_extension") # The OpenVINO installed from PyPI can be used to find OpenVINO_DIR -find_package(Python3 REQUIRED) -execute_process( - COMMAND ${Python3_EXECUTABLE} -c "from openvino.utils import get_cmake_path; print(get_cmake_path(), end='')" - OUTPUT_VARIABLE OpenVINO_DIR_PY - ERROR_QUIET -) +if(NOT CMAKE_CROSSCOMPILING) + find_package(Python3 QUIET COMPONENTS Interpreter) + if(Python3_Interpreter_FOUND) + execute_process( + COMMAND ${Python3_EXECUTABLE} -c "from openvino.utils import get_cmake_path; print(get_cmake_path(), end='')" + OUTPUT_VARIABLE OpenVINO_DIR_PY + ERROR_QUIET) + endif() +endif() find_package(OpenVINO REQUIRED PATHS "${OpenVINO_DIR_PY}") From 574c1a23085f44d84e11988f4679c7a958a9a3ec Mon Sep 17 00:00:00 2001 From: Xiake Sun Date: Sat, 21 Dec 2024 05:04:58 +0800 Subject: [PATCH 46/60] Add WA for MSVC compiler mutex constructor issue with VS2022 (#28169) ### Details: - This issue is a MSVC compiler bug affecting certain versions of Visual Studio 2022. When using std::mutex a null dereference may occur, leading to a silent crash in Release mode. - Adding the compiler option "/D_DISABLE_CONSTEXPR_MUTEX_CONSTRUCTOR" serves as a workaround for this problem. Reference: https://hydrogenaud.io/index.php/topic,126070.0.html https://github.com/microsoft/STL/wiki/Changelog#vs-2022-1710 ### Tickets: - CVS-159684 --- cmake/developer_package/compile_flags/os_flags.cmake | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index 660fd6160893ae..e75c6851ad0f7b 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -455,6 +455,12 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # Build with multiple processes ov_add_compiler_flags(/MP) + # Workaround for an MSVC compiler issue in some versions of Visual Studio 2022. + # The issue involves a null dereference to a mutex. For details, refer to link https://github.com/microsoft/STL/wiki/Changelog#vs-2022-1710 + if(MSVC AND MSVC_VERSION GREATER_EQUAL 1930 AND MSVC_VERSION LESS 1941) + ov_add_compiler_flags(/D_DISABLE_CONSTEXPR_MUTEX_CONSTRUCTOR) + endif() + if(AARCH64 AND NOT MSVC_VERSION LESS 1930) # otherwise, _ARM64_EXTENDED_INTRINSICS is defined, which defines 'mvn' macro ov_add_compiler_flags(/D_ARM64_DISTINCT_NEON_TYPES) From 6e02445c5c8e18d5cd79613986faf3dfd2ca7ec8 Mon Sep 17 00:00:00 2001 From: Steve Yoo Date: Sat, 21 Dec 2024 19:47:20 +0900 Subject: [PATCH 47/60] [GPU] Skip reorder opt when its dependency is crop (#27547) ### Details: - *Skip reorder opt when its dependency is crop* ### Tickets: - *155068* --- .../remove_redundant_reorders.cpp | 6 ++ .../passes/add_required_reorders_test.cpp | 4 +- .../passes/prepare_buffer_fusing_test.cpp | 2 +- .../unit/test_cases/reorder_gpu_test.cpp | 93 +++++++++++++++++++ 4 files changed, 102 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/remove_redundant_reorders.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/remove_redundant_reorders.cpp index 1e5f943600fc05..ac7810c6e9154c 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/remove_redundant_reorders.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/remove_redundant_reorders.cpp @@ -295,6 +295,12 @@ void remove_redundant_reorders::run(program& p) { auto o_layout = r_node.get_output_layout(); const auto& i_layout = r_node.get_input_layout(0); + auto is_r_node_rank_changed = r_node.get_output_layout().get_rank() != r_node.get_dependency(0).get_output_layout().get_rank(); + if (is_r_node_rank_changed && + ((!update_implementations && r_node.get_dependency(0).is_type()) || + (r_node.get_dependency(0).is_type() && r_node.get_dependency(0).can_be_optimized()))) + continue; + // Optimize reorder b_fs_yx_fsv16 -> bfyx when spatials are equal to 1. In this case we can reinterpret buffer, // but pads need to be handled correctly. if (i_layout.format == format::b_fs_yx_fsv16 && o_layout.format == format::bfyx && !r_node.is_output() && diff --git a/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp index 9a4cb71450a53c..0eb425b4dc1119 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp @@ -192,9 +192,9 @@ TEST(add_required_reorders, skip_adding_reorder_batch_axis_padding) { crop_prim = network.get_primitive("crop2"); ASSERT_EQ(crop_prim->can_be_optimized(), true); auto reorder_prim = network.get_primitive("crop1_reorder"); - ASSERT_EQ(reorder_prim->can_be_optimized(), true); + ASSERT_EQ(reorder_prim->can_be_optimized(), false); reorder_prim = network.get_primitive("crop2_reorder"); - ASSERT_EQ(reorder_prim->can_be_optimized(), true); + ASSERT_EQ(reorder_prim->can_be_optimized(), false); auto concate = network.get_primitive("concat"); ASSERT_EQ(concate->can_be_optimized(), false); } diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 456fab4ae0286a..1eb11c662608e0 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -1224,7 +1224,7 @@ TEST(prepare_buffer_fusing, test_implicit_crop_and_outerpadding) { auto reorder_prim = network.get_primitive("gather1_reorder"); ASSERT_EQ(reorder_prim->can_be_optimized(), true); reorder_prim = network.get_primitive("gather2_reorder"); - ASSERT_EQ(reorder_prim->can_be_optimized(), true); + ASSERT_EQ(reorder_prim->can_be_optimized(), false); auto reshape_prim = network.get_primitive("reshape1"); ASSERT_EQ(reshape_prim->can_be_optimized(), true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp index 8ade3b6c8e0f31..0f9f119f275a78 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp @@ -2467,6 +2467,99 @@ TEST(reorder_gpu_f32, bfzyx_to_bsv16_fsv16_padded) } } +TEST(reorder_gpu_f32, bfzyx_to_bfyx_padded) { + tests::random_generator rg(GET_SUITE_NAME); + auto& engine = get_test_engine(); + + const int32_t b_in = 1024; + const int32_t f_in = 64; + const int32_t x_in = 72; + const int32_t y_in = 2; + const int32_t z_in = 3; + + const int32_t b_crop = 1024; + const int32_t f_crop = 64; + const int32_t x_crop = 72; + const int32_t y_crop = 2; + const int32_t z_crop = 1; + + const int32_t z0_off = 0; + const int32_t z1_off = 1; + const int32_t z2_off = 2; + + auto input = engine.allocate_memory({ data_types::f32,format::bfzyx,{ b_in, f_in, x_in, y_in, z_in } }); + + topology topology; + topology.add(input_layout("input", input->get_layout())); + topology.add(crop("crop0", input_info("input"), { b_crop, f_crop, x_crop, y_crop, z_crop }, { 0, 0, 0, 0, z0_off })); + topology.add(crop("crop1", input_info("input"), { b_crop, f_crop, x_crop, y_crop, z_crop }, { 0, 0, 0, 0, z1_off })); + topology.add(crop("crop2", input_info("input"), { b_crop, f_crop, x_crop, y_crop, z_crop }, { 0, 0, 0, 0, z2_off })); + topology.add(reorder("reorder0", input_info("crop0"), format::bfyx, data_types::f32)); + topology.add(reorder("reorder1", input_info("crop1"), format::bfyx, data_types::f32)); + topology.add(reorder("reorder2", input_info("crop2"), format::bfyx, data_types::f32)); + topology.add(reshape("reshape0", input_info("reorder0"), tensor(batch(b_in), feature(y_in), spatial(x_in, f_in)))); + topology.add(reshape("reshape1", input_info("reorder1"), tensor(batch(b_in), feature(y_in), spatial(x_in, f_in)))); + topology.add(reshape("reshape2", input_info("reorder2"), tensor(batch(b_in), feature(y_in), spatial(x_in, f_in)))); + + std::vector input_vec = rg.generate_random_1d(input->count(), -10, 10); + set_values(input, input_vec); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + network.set_input_data("input", input); + auto outputs = network.execute(); + auto output0 = outputs.at("reshape0").get_memory(); + auto output1 = outputs.at("reshape1").get_memory(); + auto output2 = outputs.at("reshape2").get_memory(); + + cldnn::mem_lock output_ptr0(output0, get_test_stream()); + for (int b = 0; b < b_crop; ++b) { + for (int f = 0; f < f_crop; ++f) { + for (int z = 0; z < z_crop; ++z) { + for (int y = 0; y < y_crop; ++y) { + for (int x = 0; x < x_crop; ++x) { + int linear_id = x + x_in * (y + y_in * (z + z0_off + z_in * (f + f_in * b))); + int output_linear_id = x + x_crop * (y + y_crop * (z + z_crop * (f + f_crop * b))); + ASSERT_EQ(output_ptr0[output_linear_id], input_vec[linear_id]); + } + } + } + } + } + + cldnn::mem_lock output_ptr1(output1, get_test_stream()); + for (int b = 0; b < b_crop; ++b) { + for (int f = 0; f < f_crop; ++f) { + for (int z = 0; z < z_crop; ++z) { + for (int y = 0; y < y_crop; ++y) { + for (int x = 0; x < x_crop; ++x) { + int linear_id = x + x_in * (y + y_in * (z + z1_off + z_in * (f + f_in * b))); + int output_linear_id = x + x_crop * (y + y_crop * (z + z_crop * (f + f_crop * b))); + ASSERT_EQ(output_ptr1[output_linear_id], input_vec[linear_id]); + } + } + } + } + } + + cldnn::mem_lock output_ptr2(output2, get_test_stream()); + for (int b = 0; b < b_crop; ++b) { + for (int f = 0; f < f_crop; ++f) { + for (int z = 0; z < z_crop; ++z) { + for (int y = 0; y < y_crop; ++y) { + for (int x = 0; x < x_crop; ++x) { + int linear_id = x + x_in * (y + y_in * (z + z2_off + z_in * (f + f_in * b))); + int output_linear_id = x + x_crop * (y + y_crop * (z + z_crop * (f + f_crop * b))); + ASSERT_EQ(output_ptr2[output_linear_id], input_vec[linear_id]); + } + } + } + } + } +} + TEST(reorder_gpu_f32, b_fs_yx_fsv16_to_bfyx_opt_allowed) { auto& engine = get_test_engine(); From d52f1dae3cb18faee9df75fe8e5b6fd3e55c94e9 Mon Sep 17 00:00:00 2001 From: Xiong Yuan Date: Mon, 23 Dec 2024 10:26:24 +0800 Subject: [PATCH 48/60] [GPU] Fix static_impl != nullptr assertion for reorder node (#27786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Details: - Fix ”Assertion `static_impl != nullptr' failed“ issue for reorder (Convolution_0_reorder_1) node by skipping mark node in shape flow. ### Tickets: - [CVS-154336](https://jira.devtools.intel.com/browse/CVS-154336) --------- Signed-off-by: yuan.xiong --- .../mark_shape_of_subgraphs.cpp | 8 ++ .../passes/mark_shape_of_subgraphs_test.cpp | 105 ++++++++++++++++++ 2 files changed, 113 insertions(+) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp index 9539117bcf4b18..a40c7dfebb9de6 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "broadcast_inst.h" #include "shape_of_inst.h" #include "read_value_inst.h" #include "reshape_inst.h" @@ -86,6 +87,13 @@ bool mark_shape_of_subgraphs::can_mark_node(const program_node& node) { return false; } + // skip mark_node for broadcast node if dependency nodes are data and shape_of + auto& dependencies = node.get_dependencies(); + if (node.is_type() && dependencies.size() == 2) { + if (dependencies[0].first->is_type() && dependencies[1].first->is_type()) + return false; + } + return true; } diff --git a/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp index 493ab79bf8e2cb..ee4382e51645cd 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp @@ -318,3 +318,108 @@ TEST(mark_shape_of_subgraphs, gather_compressed_no_mark) { ASSERT_FALSE(check_subgraph(prog->get_node("shape_of"), prog->get_node("gather_compressed"))); ASSERT_FALSE(check_subgraph(prog->get_node("shape_of"), prog->get_node("concat"))); } + +TEST(mark_shape_of_subgraphs, broadcast_not_existed_after_shapeof) { + auto& engine = get_test_engine(); + auto input_layout_dynamic = layout{ov::PartialShape{ov::Dimension::dynamic(), 4, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f32, format::bfyx}; + auto data_0 = engine.allocate_memory({ ov::PartialShape{4}, data_types::i32, format::bfyx }); + set_values(data_0, {1, 4, 1, 1}); + auto weights = engine.allocate_memory({ data_types::f16, format::bfyx, {1152, 4, 1, 1} }); + + topology topology; + topology.add(input_layout("input", input_layout_dynamic)); + topology.add(data("data_0", data_0)); + topology.add(data("weights", weights)); + topology.add(shape_of("shape_of", input_info("input"), data_types::i32)); + topology.add(reshape("reshape", input_info("shape_of"), input_info("data_0"), false, {})); + topology.add(convolution("convolution", input_info("reshape"), "weights", "", 1, {1, 1}, {1, 1}, {0, 0}, {0, 0}, false)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + auto prog = network.get_program(); + ASSERT_NE(prog, nullptr); + + ASSERT_TRUE(check_subgraph(prog->get_node("shape_of"), prog->get_node("convolution"))); +} + +TEST(mark_shape_of_subgraphs, broadcast_w_data_and_direct_shapeof_no_mark) { + auto& engine = get_test_engine(); + auto input_layout_dynamic = layout{ov::PartialShape{ov::Dimension::dynamic(), 4, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f32, format::bfyx}; + auto data_0 = engine.allocate_memory({ ov::PartialShape{1}, data_types::i32, format::bfyx }); + set_values(data_0, {0}); + auto weights = engine.allocate_memory({ data_types::f16, format::bfyx, {1152, 4, 2, 2} }); + + topology topology; + topology.add(input_layout("input", input_layout_dynamic)); + topology.add(data("data_0", data_0)); + topology.add(shape_of("shape_of", input_info("input"), data_types::i32)); + topology.add(broadcast("broadcast", input_info("data_0"), input_info("shape_of"), {}, ov::op::BroadcastType::BIDIRECTIONAL)); + topology.add(data("weights", weights)); + topology.add(convolution("convolution", input_info("broadcast"), "weights", "", 1, {1, 1}, {1, 1}, {0, 0}, {0, 0}, false)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + auto prog = network.get_program(); + ASSERT_NE(prog, nullptr); + + ASSERT_FALSE(check_subgraph(prog->get_node("shape_of"), prog->get_node("convolution"))); + ASSERT_FALSE(check_subgraph(prog->get_node("shape_of"), prog->get_node("broadcast"))); +} + +TEST(mark_shape_of_subgraphs, broadcast_w_data_and_indirect_shapeof) { + auto& engine = get_test_engine(); + auto input_layout_dynamic = layout{ov::PartialShape{ov::Dimension::dynamic(), 4, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f32, format::bfyx}; + auto data_0 = engine.allocate_memory({ ov::PartialShape{1}, data_types::i32, format::bfyx }); + set_values(data_0, {0}); + + topology topology; + topology.add(input_layout("input", input_layout_dynamic)); + topology.add(data("data_0", data_0)); + topology.add(shape_of("shape_of", input_info("input"), data_types::i32)); + topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, 0, {})); + topology.add(broadcast("broadcast", input_info("data_0"), input_info("gather"), {}, ov::op::BroadcastType::BIDIRECTIONAL)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + auto prog = network.get_program(); + ASSERT_NE(prog, nullptr); + + ASSERT_TRUE(check_subgraph(prog->get_node("shape_of"), prog->get_node("broadcast"))); +} + +TEST(mark_shape_of_subgraphs, broadcast_w_direct_shapeof_and_data) { + auto& engine = get_test_engine(); + auto input_layout_dynamic = layout{ov::PartialShape{ov::Dimension::dynamic(), 4, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f32, format::bfyx}; + auto target_shape = engine.allocate_memory({ ov::PartialShape{4}, data_types::i32, format::bfyx }); + set_values(target_shape, {4, 4, 1, 1}); + + topology topology; + topology.add(input_layout("input", input_layout_dynamic)); + topology.add(data("target_shape", target_shape)); + topology.add(shape_of("shape_of", input_info("input"), data_types::i32)); + topology.add(broadcast("broadcast", input_info("shape_of"), input_info("target_shape"), {}, ov::op::BroadcastType::BIDIRECTIONAL)); + topology.add(reshape("reshape", input_info("input"), input_info("broadcast"), false, ov::PartialShape{4, 4, 1, 1})); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + auto prog = network.get_program(); + ASSERT_NE(prog, nullptr); + + ASSERT_TRUE(check_subgraph(prog->get_node("shape_of"), prog->get_node("broadcast"))); +} From 98add4f39d72f76f86c018bbb73bed0b9ec0deae Mon Sep 17 00:00:00 2001 From: Xiong Yuan Date: Mon, 23 Dec 2024 10:26:33 +0800 Subject: [PATCH 49/60] [GPU] Enable can_use_fsv16 and can_use_bs_fs_yx_bsv16_fsv16 for group_normalization (#27986) ### Details: - *Enable can_use_fsv16 and can_use_bs_fs_yx_bsv16_fsv16 for group_normalization in pytorch version timm_mobilevitv2_150 OV model to get better performance* ### Tickets: - *[CVS-154913](https://jira.devtools.intel.com/browse/CVS-154913)* Signed-off-by: yuan.xiong --- src/plugins/intel_gpu/src/graph/program.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index bdffb9c4980722..b71b0cdc77e3e2 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -1501,6 +1501,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) { prim.type() != cldnn::strided_slice::type_id() && prim.type() != cldnn::region_yolo::type_id() && prim.type() != cldnn::normalize::type_id() && + prim.type() != cldnn::group_normalization::type_id() && prim.type() != cldnn::mvn::type_id() && prim.type() != cldnn::gather::type_id() && prim.type() != cldnn::scatter_nd_update::type_id() && @@ -1581,6 +1582,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) { prim.type() != cldnn::deconvolution::type_id() && prim.type() != cldnn::multiclass_nms::type_id() && prim.type() != cldnn::normalize::type_id() && + prim.type() != cldnn::group_normalization::type_id() && prim.type() != cldnn::deconvolution::type_id() && prim.type() != cldnn::unique_count::type_id() && prim.type() != cldnn::unique_gather::type_id() && From 9d4c1abcc84d58378567dcb635c0d5022317c05f Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Mon, 23 Dec 2024 09:28:33 +0400 Subject: [PATCH 50/60] [GPU] Extend the cases where layouts are compatible (#28136) ### Tickets: - *[159060](https://jira.devtools.intel.com/browse/CVS-159060)* --- src/plugins/intel_gpu/src/runtime/layout.cpp | 17 ++++++++++++++--- .../tests/unit/module_tests/layout_test.cpp | 4 ++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index a2b7e62ea0cae2..5c6c6dc83aeaea 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -446,8 +446,6 @@ bool layout::compatible(const layout& other) const { if (l1.is_dynamic() || l2.is_dynamic()) return false; - auto l1_size = l1.get_tensor(); - auto l2_size = l2.get_tensor(); if (l1 == l2) return true; if (check_redundant_1d_along_feature(l1, l2)) @@ -459,7 +457,7 @@ bool layout::compatible(const layout& other) const { if (format::is_default_format(l1.format) && format::is_default_format(l2.format) && !l1.data_padding && !l2.data_padding && l1.get_linear_size() == l2.get_linear_size()) return true; - if (l1_size != l2_size) + if (l1.get_shape() != l2.get_shape()) return false; if (l1.get_linear_size() != l2.get_linear_size()) return false; @@ -505,6 +503,19 @@ bool layout::compatible(const layout& other) const { auto l1_pitch = l1.get_pitches(); auto l2_pitch = l2.get_pitches(); + auto l1_padded_dims = l1.get_padded_dims(); + auto l2_padded_dims = l2.get_padded_dims(); + + // Ignore pitches which will never be used (for padded dims with size == 1) + for (size_t i = 0; i < l1_padded_dims.size(); ++i) { + if (l1_padded_dims[i] == 1) { + l1_pitch[i] = 0; + } + if (l2_padded_dims[i] == 1) { + l2_pitch[i] = 0; + } + } + auto l1_offset = l1.get_linear_offset(); auto l2_offset = l2.get_linear_offset(); if (l1_pitch == l2_pitch && l1_offset == l2_offset) diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/layout_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/layout_test.cpp index 7c666819176a13..279a86c73f55bf 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/layout_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/layout_test.cpp @@ -261,6 +261,10 @@ INSTANTIATE_TEST_SUITE_P(smoke, layout_cmp_test, layout{ov::PartialShape{4, 2, 3, 4, 5}, data_types::f16, format::is_os_zyx_isv16_osv16}, false, false}, {layout{ov::PartialShape{4, 2, 3, 4, 5}, data_types::f16, format::goiyx}, layout{ov::PartialShape{4, 2, 3, 4, 5}, data_types::f16, format::gioyx}, false, false}, + {layout{ov::PartialShape{4, 1, 16, 16}, data_types::f16, format::bfyx}, + layout{ov::PartialShape{4, 1, 16, 16}, data_types::f16, format::byxf}, false, true}, + {layout{ov::PartialShape{2, 1, 2, 4}, data_types::f16, format::bfyx, padding({0, 0, 1, 0}, {0, 0, 1, 0})}, + layout{ov::PartialShape{2, 1, 2, 4}, data_types::f16, format::bfyx, padding({0, 1, 0, 0}, {0, 0, 0, 0})}, false, false}, })); struct layouts_transform_test_params { From 9f17ebf2d8a580d51fe8dd10f54bd6e7ae9c351d Mon Sep 17 00:00:00 2001 From: Bo Liu Date: Mon, 23 Dec 2024 15:05:45 +0800 Subject: [PATCH 51/60] [CPU] add arithmetic_mode impl for bf16_emitters (#27737) ### Details: - *add arithmetic_mode impl for bf16_emitters to fix the 'inf' out issue when input data is out of rang [bf16_min,bf16_max] during f32->bf16, which may lead to 'UNK' outputs in some LLM cases* ### Tickets: - *CVS-157254* --- .../emitters/plugin/x64/jit_bf16_emitters.hpp | 34 +++++- src/plugins/intel_cpu/src/nodes/eltwise.cpp | 50 ++++++-- src/plugins/intel_cpu/src/nodes/eltwise.h | 1 + .../aarch64/jit_uni_eltwise_generic.hpp | 1 + .../src/x64/bf16_convert_saturation.cpp | 114 ++++++++++++++++++ 5 files changed, 188 insertions(+), 12 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/bf16_convert_saturation.cpp diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp index 2bfbaa68880aa8..6ad7d758b9ff07 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp @@ -11,13 +11,14 @@ namespace intel_cpu { class jit_uni_vcvtneps2bf16 : public jit_emitter { public: + enum class conversion_mode { default_mode, saturation_mode }; jit_uni_vcvtneps2bf16(dnnl::impl::cpu::x64::jit_generator* host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, - ov::element::Type exec_prc = ov::element::bf16) + ov::element::Type exec_prc = ov::element::bf16, + conversion_mode mode = conversion_mode::default_mode) : jit_emitter(host, host_isa, exec_prc) { - if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16) && - !dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2_vnni_2)) - prepare_table(); + prepare_table(); + mode_ = mode; } size_t get_inputs_num() const override { @@ -25,6 +26,7 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter { } private: + conversion_mode mode_ = conversion_mode::default_mode; void emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const override { if (host_isa_ == dnnl::impl::cpu::x64::avx512_core) { emit_isa(in_vec_idxs, out_vec_idxs); @@ -44,6 +46,25 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter { conditional3::type; Vmm in = Vmm(in_vec_idxs[0]); + if (mode_ == conversion_mode::saturation_mode) { + Vmm vmm_temp = Vmm(out_vec_idxs[0]); + + h->uni_vmaxps(vmm_temp, in, table_val("bf16_min")); + h->uni_vminps(vmm_temp, vmm_temp, table_val("bf16_max")); + + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) { + h->vfixupimmps(vmm_temp, in, table_val("selector"), 0); + } else { + Vmm mask = Vmm(aux_vec_idxs[0]); + h->uni_vcmpps(mask, in, in, 0x03); // _CMP_UNORD_Q + h->uni_vblendvps(vmm_temp, vmm_temp, table_val("nan"), mask); + h->uni_vcmpps(mask, in, table_val("inf"), 0x00); // _CMP_EQ_OQ + h->uni_vblendvps(vmm_temp, vmm_temp, table_val("inf"), mask); + h->uni_vcmpps(mask, in, table_val("neg_inf"), 0x00); // _CMP_EQ_OQ + h->uni_vblendvps(vmm_temp, vmm_temp, table_val("neg_inf"), mask); + } + h->uni_vmovups(in, vmm_temp); + } if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) { Ymm out = Ymm(out_vec_idxs[0]); @@ -119,6 +140,11 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter { push_arg_entry_of("rounding", 0x00010000, true); push_arg_entry_of("selector", selector_int32, true); push_arg_entry_of("mask_truncation_word", 0x0000ffff, true); + push_arg_entry_of("bf16_max", 0x7F7F0000, true); + push_arg_entry_of("bf16_min", 0xFF7F0000, true); + push_arg_entry_of("nan", 0x7FC00000, true); + push_arg_entry_of("inf", 0x7F800000, true); + push_arg_entry_of("neg_inf", 0xFF800000, true); } size_t aux_vecs_count() const override { diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/eltwise.cpp index 5daefa01eddfab..c2e770db84695b 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/eltwise.cpp @@ -341,8 +341,11 @@ struct jit_uni_eltwise_generic : public jit_uni_eltwise_kernel, public jit_gener reg_d_bias)); } - if (mayiuse(avx512_core) || mayiuse(avx2_vnni_2)) - uni_vcvtneps2bf16.reset(new jit_uni_vcvtneps2bf16(this, isa)); + if (mayiuse(avx512_core) || mayiuse(avx2_vnni_2)) { + auto const mode = jep_.do_output_saturation ? jit_uni_vcvtneps2bf16::conversion_mode::saturation_mode + : jit_uni_vcvtneps2bf16::conversion_mode::default_mode; + uni_vcvtneps2bf16.reset(new jit_uni_vcvtneps2bf16(this, isa, element::bf16, mode)); + } const auto& jep = jep_; @@ -478,7 +481,11 @@ struct jit_uni_eltwise_generic : public jit_uni_eltwise_kernel, public jit_gener apply_post_ops(true, jep_.oc_size > 1 ? j * sizeof(float) : 0); - store_scalar(ptr[reg_dst + j * jep.dst_prc.size()], xmm_dst, exec_prc, jep.dst_prc); + store_scalar(ptr[reg_dst + j * jep.dst_prc.size()], + xmm_dst, + exec_prc, + jep.dst_prc, + jep.do_output_saturation); } for (size_t i = 0; i < jep.inputs_number; i++) @@ -546,7 +553,7 @@ struct jit_uni_eltwise_generic : public jit_uni_eltwise_kernel, public jit_gener apply_post_ops(true); - store_scalar(ptr[reg_dst], xmm_dst, exec_prc, jep.dst_prc); + store_scalar(ptr[reg_dst], xmm_dst, exec_prc, jep.dst_prc, jep.do_output_saturation); for (size_t i = 0; i < jep.inputs_number; i++) if (jep.src_size[i] != 1) @@ -1012,7 +1019,8 @@ struct jit_uni_eltwise_generic : public jit_uni_eltwise_kernel, public jit_gener inline void store_scalar(const Xbyak::Address& op, Xmm xmm_dst, ov::element::Type src_prc, - ov::element::Type dst_prc) { + ov::element::Type dst_prc, + const bool do_output_saturation) { if (src_prc == dst_prc) { switch (src_prc.size()) { case 4: @@ -1047,7 +1055,11 @@ struct jit_uni_eltwise_generic : public jit_uni_eltwise_kernel, public jit_gener uni_vmovss(op, xmm_dst); break; case ov::element::bf16: - uni_vpsrld(xmm_dst, xmm_dst, 16); + if (do_output_saturation) + uni_vpsrld(xmm_dst, xmm_dst, 16); + else + uni_vcvtneps2bf16->emit_code({static_cast(xmm_dst.getIdx())}, + {static_cast(xmm_dst.getIdx())}); uni_vpextrw(op, xmm_dst, 0x0); break; case ov::element::f16: @@ -1355,6 +1367,7 @@ struct EltwiseKey { ov::element::Type outPrc; dnnl::post_ops postOps; EltwiseImplType implType; + bool doOutputSaturation; size_t hash() const { using namespace dnnl::impl; @@ -1390,6 +1403,10 @@ struct EltwiseKey { seed = hash_combine(seed, outPrc.hash()); seed = get_post_op_hash(seed, *postOps.get()); seed = hash_combine(seed, implType); + + if (outPrc == ov::element::bf16) { + seed = hash_combine(seed, doOutputSaturation); + } return seed; } @@ -1416,6 +1433,8 @@ struct EltwiseKey { result = result && (inpDims[i] == rhs.inpDims[i]); } } + if (doOutputSaturation != rhs.doOutputSaturation) + return false; } return result; @@ -1448,7 +1467,8 @@ class EltwiseJitExecutor : public Eltwise::IEltwiseExecutor { const std::vector& inpPrc, const ov::element::Type& outPrc, const dnnl::post_ops& post_ops, - bool useRuntimePtrs) { + bool useRuntimePtrs, + bool doOutputSaturation) { auto collapseLastDims = [](std::vector& dims, int dimsToCollapse) { for (size_t i = dims.size() - 2; i > dims.size() - dimsToCollapse - 2; i--) { dims[dims.size() - 1] *= dims[i]; @@ -1639,6 +1659,7 @@ class EltwiseJitExecutor : public Eltwise::IEltwiseExecutor { jep.dst_prc = outPrc; jep.work_amount = jep.dst_size = jep.dims.back(); jep.oc_size = oc_size; + jep.do_output_saturation = doOutputSaturation; std::transform(jep.oc_offsets.begin(), jep.oc_offsets.end(), jep.oc_offsets.begin(), [](size_t& offset) { return offset * sizeof(float); @@ -2160,7 +2181,8 @@ static Eltwise::executorPtr buildExecutor(const EltwiseKey& key) { key.inpPrc, key.outPrc, key.postOps, - key.implType == EltwiseImplType::optimizedShapeAgnostic); + key.implType == EltwiseImplType::optimizedShapeAgnostic, + key.doOutputSaturation); } bool Eltwise::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { @@ -2862,6 +2884,18 @@ void Eltwise::prepareParams() { } } + // FP32 constant inputs may contain values out of BF16 representable range. In case output precision is BF16 we + // choose "saturation" mode for fp32->bf16 conversion procedure to prevent getting -Inf/+Inf values in the + // outputs. Since "saturation" conversion is more time consuming, better solution would be to clamp constants on + // compilation stage (ticket: 159589). + key.doOutputSaturation = false; + for (size_t i = 0; i < getParentEdges().size(); i++) { + if (getParentEdgeAt(i)->getParent()->isConstant()) { + key.doOutputSaturation = true; + break; + } + } + auto cache = context->getParamsCache(); auto result = cache->getOrCreate(key, buildExecutor); execPtr = result.first; diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.h b/src/plugins/intel_cpu/src/nodes/eltwise.h index d0ca94e08824c8..8e5fd643665ffd 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.h +++ b/src/plugins/intel_cpu/src/nodes/eltwise.h @@ -43,6 +43,7 @@ struct jit_eltwise_params { size_t work_amount; bool use_runtime_ptrs; + bool do_output_saturation; }; struct jit_eltwise_call_args_indexes { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp index 1bf64d096e4a84..c4fb7608d521de 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp @@ -57,6 +57,7 @@ struct jit_eltwise_params { size_t work_amount; bool use_runtime_ptrs; + bool do_output_saturation; }; struct jit_eltwise_call_args_indexes { diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/bf16_convert_saturation.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/bf16_convert_saturation.cpp new file mode 100644 index 00000000000000..96c08eeffed15a --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/bf16_convert_saturation.cpp @@ -0,0 +1,114 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "utils/fusing_test_utils.hpp" + +using namespace CPUTestUtils; +namespace ov { +namespace test { +/* + This test aims to cover Eltwise node BF16 output precision conversion logic in "saturation" mode. In this test, we + have a select node with condition input of boolean type and then/else inputs of f32 type(as constant node with bf16 + overflow data). The select node is followed by a convolution node to ensoure that it is converted to bf16 precision. +*/ +using selectParams = std::tuple; +class BF16ConvertSaturation : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CpuTestWithFusing { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + InputShape shapes; + ElementType precision; + std::tie(shapes, precision) = obj.param; + + std::ostringstream result; + result << "Condition_prc_" << ElementType::boolean << "_Then_Else_prc_" << precision << "_"; + result << "IS=(" << shapes.first << ")_TS=("; + for (const auto& item : shapes.second) { + result << ov::test::utils::vec2str(item) << "_"; + } + result << "PluginConf_inference_precision=bf16"; + + return result.str(); + } + +protected: + void SetUp() override { + abs_threshold = 0; + targetDevice = ov::test::utils::DEVICE_CPU; + InputShape shapes; + ElementType precision; + std::tie(shapes, precision) = this->GetParam(); + init_input_shapes({shapes}); + std::tie(inFmts, outFmts, priority, selectedType) = emptyCPUSpec; + selectedType = makeSelectedTypeStr(getPrimitiveType(), ov::element::i8); + ov::element::TypeVector types{ov::element::boolean, precision, precision}; + ov::ParameterVector parameters; + auto param = std::make_shared(ov::element::boolean, inputDynamicShapes[0]); + parameters.push_back(param); + + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -3.40282e+38; + in_data.range = 1; + in_data.resolution = 1; + auto thenTensor = ov::test::utils::create_and_fill_tensor(precision, ov::Shape{1}, in_data); + + in_data.start_from = 3.40282e+38; + in_data.range = 10; + in_data.resolution = 2; + auto elseTensor = ov::test::utils::create_and_fill_tensor(precision, ov::Shape{2, 1, 32, 32}, in_data); + + auto select = std::make_shared(parameters[0], + std::make_shared(thenTensor), + std::make_shared(elseTensor), + ov::op::AutoBroadcastType::NUMPY); + + auto conv_filter_shape = ov::Shape{1, 1, 3, 3}; + auto conv_filter = ov::op::v0::Constant::create(ElementType::f32, conv_filter_shape, {1}); + auto strides = ov::Strides{1, 1}; + auto pads_begin = ov::CoordinateDiff{0, 0}; + auto pads_end = ov::CoordinateDiff{0, 0}; + auto dilations = ov::Strides{1, 1}; + auto conv = + std::make_shared(select, conv_filter, strides, pads_begin, pads_end, dilations); + + function = makeNgraphFunction(ElementType::f32, parameters, conv, "Eltwise"); + configuration.insert({ov::hint::inference_precision(ov::element::bf16)}); + } + + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& modelInputs = function->inputs(); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 3; + in_data.resolution = 2; + auto condTensor = ov::test::utils::create_and_fill_tensor(modelInputs[0].get_element_type(), + targetInputStaticShapes[0], + in_data); + + inputs.insert({modelInputs[0].get_node_shared_ptr(), condTensor}); + } +}; + +TEST_P(BF16ConvertSaturation, CompareWithRefs) { + run(); +} + +const std::vector inShapes = { + // Condition + {{-1, -1, -1, -1}, {{2, 1, 32, 32}}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BF16ConvertSaturationTest, + BF16ConvertSaturation, + ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::Values(ElementType::f32)), + BF16ConvertSaturation::getTestCaseName); + +} // namespace test +} // namespace ov \ No newline at end of file From f93d051f24cbf40d2d5d6f1f24a1a780573dfc2b Mon Sep 17 00:00:00 2001 From: Paul Youngsoo Ahn Date: Mon, 23 Dec 2024 17:28:35 +0900 Subject: [PATCH 52/60] Add missing code in dynamic fc impl (#28026) ### Details: - *Add acc_tmp in general calc in fc funcion in common include file* ### Tickets: - *158460* --- .../fully_connected_gpu_bf_tiled.cl | 4 +++- .../fully_connected_gpu_bf_tiled_common.cl | 19 ++++++++++++------- .../test_cases/fully_connected_gpu_test.cpp | 4 ++++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl index 01c8e8853e350d..6a5c9e54a8e904 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl @@ -601,8 +601,10 @@ inline void FUNC(fc_bf_tiled_kernel_default)( #endif #if TILE_OFM > 1 ((ACCUMULATOR_TYPE*)(&acc[bi]))[fi] += ((ACCUMULATOR_TYPE*)(&acc_tmp[bi]))[fi] * ds; + acc_tmp[bi][fi] = 0; #else acc[bi] += acc_tmp[bi] * ds; + acc_tmp[bi] = 0; #endif } } @@ -972,7 +974,7 @@ inline void FUNC(fc_bf_tiled_kernel_dyn_quan)( // ===================================================================================================================================== // Main computation loop const uint iterations = MAIN_LOOP_ELEMENTS_COUNT / TILE_IFM_ELEMENTS_SIZE; // TILE_IFM_ELEMENTS_SIZE : (TILE_IFM * SIMD) - // Each sub-group loads 2 Batch + // Each sub-group loads 2 Batch uint idx_sglid = (sglid * TILE_K) % TILE_IFM_ELEMENTS_SIZE; // same index for sglid 0~7 : to tile_k direction uint batch_sglid = (sglid * TILE_K) / TILE_IFM_ELEMENTS_SIZE; // 0 to 1 : to batch direction diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/fully_connected_gpu_bf_tiled_common.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/fully_connected_gpu_bf_tiled_common.cl index ca5c1ea3646d02..3f5796a30933ac 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/fully_connected_gpu_bf_tiled_common.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/fully_connected_gpu_bf_tiled_common.cl @@ -147,9 +147,7 @@ inline void (FUNC_NAME)( // NOTE: Manually unrolling multiplication loop leads to lower register pressure and allows for bigger block sizes, // but significantly degrades readability and generality of code. // It doesn't also show noticable performance improvement on tested configurations. - #if DECOMPRESSION_SCALE_POST_OP - ACCUMULATOR_VEC_TYPE acc_tmp[FORCED_TILE_B] = { }; - #endif + ACCUMULATOR_VEC_TYPE acc_tmp[FORCED_TILE_B] = { }; unroll_for(uint ki = 0; ki < (TILE_IFM * SIMD) / TILE_K; ++ki) { #if COMPRESSED_WEIGHTS_INT4 @@ -201,11 +199,7 @@ inline void (FUNC_NAME)( unroll_for (uint bi = 0; bi < FORCED_TILE_B; ++bi) { INPUT0_TYPE in_val = _sub_group_shuffle(((INPUT0_TYPE*)(&in_0[bi]))[total_k / SIMD], total_k % SIMD); unroll_for (uint fi = 0; fi < TILE_OFM; ++fi) { -#if DECOMPRESSION_SCALE_POST_OP ((ACCUMULATOR_TYPE*)(&acc_tmp[bi]))[fi] += in_val * ((ACCUMULATOR_TYPE*)(&wei))[W_IDX]; -#else - ((ACCUMULATOR_TYPE*)(&acc[bi]))[fi] += in_val * ((ACCUMULATOR_TYPE*)(&wei))[W_IDX]; -#endif } } } @@ -240,9 +234,20 @@ inline void (FUNC_NAME)( ACCUMULATOR_TYPE ds = d_scales[fi % DECOMPRESSION_SCALE_LENGTH]; #endif ((ACCUMULATOR_TYPE*)(&acc[bi]))[fi] += ((ACCUMULATOR_TYPE*)(&acc_tmp[bi]))[fi] * ds; + acc_tmp[bi][fi] = 0; } } #endif + +#if !DECOMPRESSION_SCALE_POST_OP + unroll_for (uint bi = 0; bi < FORCED_TILE_B; ++bi) { + unroll_for(uint fi = 0; fi < TILE_OFM; ++fi) { + ((ACCUMULATOR_TYPE*)(&acc[bi]))[fi] += ((ACCUMULATOR_TYPE*)(&acc_tmp[bi]))[fi]; + } + } +#endif + + } // ===================================================================================================================================== // Leftovers diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp index f59dc5c42cffc1..c3caebe9d0ba68 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp @@ -4137,6 +4137,10 @@ TEST_F(fully_connected_gpu_tests, compressed_int4_scale_dyn_quan_dynamic_f_input this->test_compressed_int4_scale_dyn_quan(false, true, 511, true); } +TEST_F(fully_connected_gpu_tests, compressed_int4_scale_dynamic_quantize_batch_1) { + this->test_compressed_int4_scale_dyn_quan_weight_i4(true, 1, 2048, 3072); +} + TEST_F(fully_connected_gpu_tests, compressed_int4_scale_dynamic_quantize_edge_case) { this->test_compressed_int4_scale_dyn_quan_weight_i4(true, 359, 1536, 2560); } From 8ee6f407a9993db0c1d8d9347531bec013d53703 Mon Sep 17 00:00:00 2001 From: Paul Youngsoo Ahn Date: Mon, 23 Dec 2024 17:29:06 +0900 Subject: [PATCH 53/60] [GPU] Implement fake_convert (#28065) ### Details: - *implement fake_convert* - *add functional test for fake convert* ### Tickets: - *159263* --- src/core/include/openvino/op/fake_convert.hpp | 1 + src/core/src/op/fake_convert.cpp | 4 + .../intel_gpu/plugin/primitives_list.hpp | 1 + .../intel_gpu/primitives/fake_convert.hpp | 68 +++++++++ .../intel_gpu/src/graph/fake_convert.cpp | 72 +++++++++ .../src/graph/impls/cpu/fake_convert.cpp | 131 ++++++++++++++++ .../src/graph/impls/cpu/register.cpp | 1 + .../src/graph/impls/cpu/register.hpp | 1 + .../impls/registry/fake_convert_impls.cpp | 24 +++ .../src/graph/impls/registry/registry.hpp | 1 + .../src/graph/include/fake_convert_inst.h | 55 +++++++ .../intel_gpu/src/plugin/ops/fake_convert.cpp | 39 +++++ .../single_layer_tests/fake_convert.cpp | 141 ++++++++++++++++++ .../unit/module_tests/impls_registry_test.cpp | 4 +- 14 files changed, 542 insertions(+), 1 deletion(-) create mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/fake_convert.hpp create mode 100644 src/plugins/intel_gpu/src/graph/fake_convert.cpp create mode 100644 src/plugins/intel_gpu/src/graph/impls/cpu/fake_convert.cpp create mode 100644 src/plugins/intel_gpu/src/graph/impls/registry/fake_convert_impls.cpp create mode 100644 src/plugins/intel_gpu/src/graph/include/fake_convert_inst.h create mode 100644 src/plugins/intel_gpu/src/plugin/ops/fake_convert.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/single_layer_tests/fake_convert.cpp diff --git a/src/core/include/openvino/op/fake_convert.hpp b/src/core/include/openvino/op/fake_convert.hpp index c3eaa43b98a51b..16ef7a0337c15b 100644 --- a/src/core/include/openvino/op/fake_convert.hpp +++ b/src/core/include/openvino/op/fake_convert.hpp @@ -68,6 +68,7 @@ class OPENVINO_API FakeConvert : public Op { bool has_evaluate() const override; std::string get_destination_type() const; + void set_destination_type(ov::element::Type destination_type); const ov::element::Type& get_destination_element_type() const; private: diff --git a/src/core/src/op/fake_convert.cpp b/src/core/src/op/fake_convert.cpp index 5b3c8f8d8e9938..517674402ef872 100644 --- a/src/core/src/op/fake_convert.cpp +++ b/src/core/src/op/fake_convert.cpp @@ -79,6 +79,10 @@ std::string FakeConvert::get_destination_type() const { return m_destination_type.get_type_name(); } +void FakeConvert::set_destination_type(ov::element::Type destination_type) { + m_destination_type = destination_type; +} + const ov::element::Type& FakeConvert::get_destination_element_type() const { return m_destination_type; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index c7524f1880157d..0950614897ab43 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -267,6 +267,7 @@ REGISTER_FACTORY(v13, ScaledDotProductAttention); REGISTER_FACTORY(v13, BitwiseAnd); REGISTER_FACTORY(v13, BitwiseOr); REGISTER_FACTORY(v13, BitwiseXor); +REGISTER_FACTORY(v13, FakeConvert); // ------------------------------ Supported v15 ops ----------------------------- // REGISTER_FACTORY(v15, ROIAlignRotated); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/fake_convert.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/fake_convert.hpp new file mode 100644 index 00000000000000..c16af0be51abda --- /dev/null +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/fake_convert.hpp @@ -0,0 +1,68 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "primitive.hpp" +#include + +namespace cldnn { + +/// @brief FakeConvert performs element-wise quantization of input values +/// into a set of values corresponding to a target low-precision type. +struct fake_convert : public primitive_base { + CLDNN_DECLARE_PRIMITIVE(fake_convert) + + fake_convert() : primitive_base("", {}) {} + + /// @brief Constructs fake_convert primitive. + /// @param id This primitive id. + /// @param input Input primitive id. + /// @param scale Scale primitive id. + /// @param shift Shift primitive id. + /// @param destination_type The low precision type to be emulated. + fake_convert(const primitive_id& id, + const input_info& input, + const input_info& scale, + const input_info& shift, + ov::element::Type destination_type = ov::element::Type_t::f8e4m3) + : primitive_base(id, {input, scale, shift}, 1), destination_type(destination_type) {} + + /// @brief Constructs fake_convert primitive. + /// @param id This primitive id. + /// @param input Input primitive id. + /// @param scale Scale primitive id. + /// @param shift Shift primitive id. + /// @param destination_type The low precision type to be emulated. + fake_convert(const primitive_id& id, + const input_info& input, + const input_info& scale, + ov::element::Type destination_type = ov::element::Type_t::f8e4m3) + : primitive_base(id, {input, scale}, 1), destination_type(destination_type) {} + + ov::element::Type destination_type; + + size_t hash() const override { + size_t seed = primitive::hash(); + seed = hash_combine(seed, destination_type.get_type_name()); + return seed; + } + + bool operator==(const primitive& rhs) const override { + if (!compare_common_params(rhs)) + return false; + auto rhs_casted = downcast(rhs); + return (destination_type == rhs_casted.destination_type); + } + + void save(BinaryOutputBuffer& ob) const override { + primitive_base::save(ob); + ob << make_data(&destination_type, sizeof(destination_type)); + } + + void load(BinaryInputBuffer& ib) override { + primitive_base::load(ib); + ib >> make_data(&destination_type, sizeof(destination_type)); + } +}; +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/fake_convert.cpp b/src/plugins/intel_gpu/src/graph/fake_convert.cpp new file mode 100644 index 00000000000000..b201378d52cc8d --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/fake_convert.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fake_convert_inst.h" +#include "fake_convert_shape_inference.hpp" + +#include "primitive_type_base.h" +#include "intel_gpu/runtime/error_handler.hpp" +#include "json_object.h" +#include + +namespace cldnn { +GPU_DEFINE_PRIMITIVE_TYPE_ID(fake_convert) + +layout fake_convert_inst::calc_output_layout(fake_convert_node const& node, kernel_impl_params const& impl_param) { + return calc_output_layouts(node, impl_param)[0]; +} + +template +std::vector fake_convert_inst::calc_output_layouts(fake_convert_node const& node, kernel_impl_params const& impl_param) { + const auto& input_layout = impl_param.get_input_layout(0); + auto output_type = ov::element::Type(input_layout.data_type); + + OPENVINO_ASSERT(ov::element::Type::merge(output_type, output_type, ov::element::Type(impl_param.get_input_layout(1).data_type)), + "Mixed input types are not supported."); + + if (impl_param.input_layouts.size() == 3) { + OPENVINO_ASSERT(ov::element::Type::merge(output_type, output_type, ov::element::Type(impl_param.get_input_layout(2).data_type)), + "Mixed input types are not supported."); + } + + switch (output_type) { + case ov::element::bf16: + case ov::element::f16: + case ov::element::f32: + break; + default: + OPENVINO_THROW("The output data type should be a bf16, f16, f32 but got: ", output_type); + } + + return { layout{input_layout.get_partial_shape(), output_type, input_layout.format} }; +} + +template std::vector fake_convert_inst::calc_output_layouts(fake_convert_node const& node, const kernel_impl_params& impl_param); + +std::string fake_convert_inst::to_string(fake_convert_node const& node) { + auto desc = node.get_primitive(); + auto node_info = node.desc_to_json(); + auto& input = node.input(); + auto& scale = node.scale(); + + std::stringstream primitive_description; + + json_composite fake_convert_info; + fake_convert_info.add("input id", input.id()); + fake_convert_info.add("scale id", scale.id()); + if (node.has_shift()) { + fake_convert_info.add("shift id", node.shift().id()); + } + fake_convert_info.add("destination_type", node.get_destination_type().get_type_name()); + + node_info->add("fake_convert info", fake_convert_info); + node_info->dump(primitive_description); + + return primitive_description.str(); +} + +fake_convert_inst::typed_primitive_inst(network& network, fake_convert_node const& node) + : parent(network, node) {} + +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/fake_convert.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/fake_convert.cpp new file mode 100644 index 00000000000000..a5f94741c40bf5 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/fake_convert.cpp @@ -0,0 +1,131 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "impls/cpu/cpu_impl_helpers.hpp" +#include "register.hpp" +#include "fake_convert_inst.h" +#include "impls/registry/implementation_map.hpp" + +#include "openvino/op/fake_convert.hpp" + +namespace cldnn { +namespace cpu { + +struct fake_convert_impl : public typed_primitive_impl { + using parent = typed_primitive_impl; + using parent::parent; + + ov::element::Type destination_type; + + std::shared_ptr op; + + DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::cpu::fake_convert_impl) + + std::unique_ptr clone() const override { + return make_unique(*this); + } + + fake_convert_impl() : parent("fake_convert_cpu_impl") {} + + explicit fake_convert_impl(const fake_convert_node& outer) { + set_node_params(outer); + } + + void set_node_params(const program_node& arg) override { + OPENVINO_ASSERT(arg.is_type(), "[GPU] Incorrect program_node type"); + const auto& node = arg.as(); + destination_type = node.get_destination_type(); + } + + void save(BinaryOutputBuffer& ob) const override { + parent::save(ob); + ob << make_data(&destination_type, sizeof(destination_type)); + } + + void load(BinaryInputBuffer& ib) override { + parent::load(ib); + ib >> make_data(&destination_type, sizeof(destination_type)); + } + + event::ptr execute_impl(const std::vector& events, fake_convert_inst& instance) override { + OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, "fake_convert::execute_impl"); + auto& stream = instance.get_network().get_stream(); + + const bool pass_through_events = (stream.get_queue_type() == QueueTypes::out_of_order) && instance.all_dependencies_cpu_impl(); + + if (!pass_through_events) { + stream.wait_for_events(events); + } + + auto params = instance.get_impl_params(); + + ov::TensorVector input_host_tensors; + ov::TensorVector output_host_tensors; + + if (!op) { + op = std::make_shared(); + op->set_destination_type(destination_type); + } + + std::vector input_mem_ptrs; + for (size_t i = 0; i < instance.dependencies().size(); i++) + input_mem_ptrs.push_back(instance.dep_memory_ptr(i)); + + auto output_mem_ptr = instance.output_memory_ptr(); + + cldnn::mem_lock output_lock(output_mem_ptr, stream); + + for (size_t i = 0; i < input_mem_ptrs.size(); i++) + input_host_tensors.push_back(make_tensor(params->input_layouts[i], input_mem_ptrs[i]->lock(stream, mem_lock_type::read))); + + output_host_tensors.push_back(make_tensor(params->output_layouts[0], output_lock.data())); + + OPENVINO_ASSERT(op->evaluate(output_host_tensors, input_host_tensors), + "[GPU] Couldn't execute fake_convert primitive with id ", instance.id()); + + if (pass_through_events) { + return stream.group_events(events); + } + + return make_output_event(stream, instance.is_output()); + } + + void init_kernels(const kernels_cache& , const kernel_impl_params&) override {} + + void update(primitive_inst& inst, const kernel_impl_params& impl_param) override {} + +public: + static std::unique_ptr create(const fake_convert_node& arg, const kernel_impl_params& impl_param) { + return make_unique(); + } +}; + + +namespace detail { + +attach_fake_convert_impl::attach_fake_convert_impl() { + auto formats = { + format::bfyx, + format::bfzyx, + format::bfwzyx, + format::bfuwzyx, + format::bfvuwzyx, + }; + + auto types = { + data_types::f32, + data_types::f16, + data_types::bf16 + }; + + implementation_map::add(impl_types::cpu, shape_types::static_shape, fake_convert_impl::create, types, formats); + implementation_map::add(impl_types::cpu, shape_types::dynamic_shape, fake_convert_impl::create, types, formats); +} + +} // namespace detail +} // namespace cpu +} // namespace cldnn + +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::cpu::fake_convert_impl) +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::fake_convert) diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/register.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/register.cpp index 2b0dc5b212158c..e86628444de439 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/register.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/register.cpp @@ -31,6 +31,7 @@ void register_implementations() { REGISTER_CPU(tile); REGISTER_CPU(select); REGISTER_CPU(reduce); + REGISTER_CPU(fake_convert); } } // namespace cpu diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/register.hpp b/src/plugins/intel_gpu/src/graph/impls/cpu/register.hpp index cb89eae29d8c56..15cc4b11c077eb 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/register.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/register.hpp @@ -56,6 +56,7 @@ REGISTER_CPU(broadcast); REGISTER_CPU(tile); REGISTER_CPU(select); REGISTER_CPU(reduce); +REGISTER_CPU(fake_convert); #undef REGISTER_CPU diff --git a/src/plugins/intel_gpu/src/graph/impls/registry/fake_convert_impls.cpp b/src/plugins/intel_gpu/src/graph/impls/registry/fake_convert_impls.cpp new file mode 100644 index 00000000000000..991ab5aa12657a --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/impls/registry/fake_convert_impls.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "registry.hpp" +#include "intel_gpu/primitives/fake_convert.hpp" +#include "primitive_inst.h" + +namespace ov { +namespace intel_gpu { + +using namespace cldnn; + +const std::vector>& Registry::get_implementations() { + static const std::vector> impls = { + OV_GPU_GET_INSTANCE_CPU(fake_convert, shape_types::static_shape) + OV_GPU_GET_INSTANCE_CPU(fake_convert, shape_types::dynamic_shape) + }; + + return impls; +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp b/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp index f45d0897f01363..b2778233f41e64 100644 --- a/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp @@ -145,6 +145,7 @@ REGISTER_IMPLS(scatter_elements_update); REGISTER_IMPLS(shape_of); REGISTER_IMPLS(strided_slice); REGISTER_IMPLS(tile); +REGISTER_IMPLS(fake_convert); REGISTER_DEFAULT_IMPLS(assign, CPU_S, CPU_D); REGISTER_DEFAULT_IMPLS(read_value, CPU_S, CPU_D); diff --git a/src/plugins/intel_gpu/src/graph/include/fake_convert_inst.h b/src/plugins/intel_gpu/src/graph/include/fake_convert_inst.h new file mode 100644 index 00000000000000..d86c565a5e6b2e --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/include/fake_convert_inst.h @@ -0,0 +1,55 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "intel_gpu/primitives/fake_convert.hpp" +#include "primitive_inst.h" + +#include +#include + +namespace cldnn { + +template <> +struct typed_program_node : public typed_program_node_base { + using parent = typed_program_node_base; + typed_program_node(const std::shared_ptr prim, program& prog) + : parent(prim, prog), destination_type(prim->destination_type) { + support_padding_all(true); + } + +public: + using parent::parent; + + program_node& input() const { return get_dependency(0); } + program_node& scale() const { return get_dependency(1); } + program_node& shift() const { return get_dependency(2); } + bool has_shift() const { return (get_dependencies().size() == 3); } + + ov::element::Type get_destination_type() const { return destination_type; } + + std::vector get_shape_infer_dependencies() const override { return {}; } + +private: + ov::element::Type destination_type; +}; + +using fake_convert_node = typed_program_node; + +template <> +class typed_primitive_inst : public typed_primitive_inst_base { + using parent = typed_primitive_inst_base; + using parent::parent; + +public: + template + static std::vector calc_output_layouts(fake_convert_node const& /*node*/, const kernel_impl_params& impl_param); + static layout calc_output_layout(fake_convert_node const& node, kernel_impl_params const& impl_param); + static std::string to_string(fake_convert_node const& node); + + typed_primitive_inst(network& network, fake_convert_node const& node); +}; + +using fake_convert_inst = typed_primitive_inst; +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/plugin/ops/fake_convert.cpp b/src/plugins/intel_gpu/src/plugin/ops/fake_convert.cpp new file mode 100644 index 00000000000000..282a483deab189 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/ops/fake_convert.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "intel_gpu/plugin/program_builder.hpp" +#include "intel_gpu/plugin/common_utils.hpp" + +#include "openvino/op/fake_convert.hpp" + +#include "intel_gpu/primitives/fake_convert.hpp" + +namespace ov { +namespace intel_gpu { +static void CreateFakeConvertOp(ProgramBuilder& p, const std::shared_ptr& op) { + validate_inputs_count(op, {2, 3}); + const auto inputs = p.GetInputInfo(op); + const std::string layerName = layer_type_name_ID(op); + ov::element::Type destination_type = op->get_destination_element_type(); + std::shared_ptr fake_convert_prim = nullptr; + if (inputs.size() == 2) { + fake_convert_prim = std::make_shared(layerName, + inputs[0], + inputs[1], + destination_type); + } else { + fake_convert_prim = std::make_shared(layerName, + inputs[0], + inputs[1], + inputs[2], + destination_type); + } + + p.add_primitive(*op, fake_convert_prim); +} + +REGISTER_FACTORY_IMPL(v13, FakeConvert); + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/fake_convert.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/fake_convert.cpp new file mode 100644 index 00000000000000..d1236f5c524421 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/fake_convert.cpp @@ -0,0 +1,141 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/file_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/fake_convert.hpp" + +namespace { + +namespace fp8 { +constexpr float MAX_F8E4M3 = 448.f; +constexpr float MAX_F8E5M2 = 57344.f; +} // namespace fp8 + +using namespace std; +using namespace ov; +using namespace testing; +using ov::test::InputShape; + +using FakeConvertTestParams = std::tuple< + ov::Shape, // Input shapes + ov::Shape, // Scale shape + ov::Shape, // Shift shape + ov::element::Type, // input precision + ov::element::Type, // destination type + std::string >; // device name + +class FakeConvertTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + ov::Shape input_shape; + ov::Shape scale_shape; + ov::Shape shift_shape; + ov::element::Type prec; + ov::element::Type destination_type; + std::string target_device; + + std::tie(input_shape, scale_shape, shift_shape, prec, destination_type, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + result << ov::test::utils::vec2str(input_shape) << "_"; + result << "scale_shape=" << ov::test::utils::vec2str(scale_shape) << "_"; + result << "shift_shape=" << ov::test::utils::vec2str(shift_shape) << "_"; + result << "input_precision=" << prec << "_"; + result << "destination_type=" << destination_type << "_"; + result << "device_type=" << target_device; + return result.str(); + } + +protected: + ov::Shape input_shape, scale_shape, shift_shape; + ov::element::Type destination_type; + + void SetUp() override { + ov::element::Type prec; + std::tie(input_shape, scale_shape, shift_shape, prec, destination_type, targetDevice) = GetParam(); + const float MAX_FP8 = (destination_type == ov::element::f8e4m3) ? fp8::MAX_F8E4M3 : fp8::MAX_F8E5M2; + if (shift_shape.empty()) { + auto data = make_shared(prec, input_shape); + auto scale = op::v0::Constant::create(prec, + scale_shape, + {MAX_FP8 / (MAX_FP8 / 2.f), + 1.0f, + MAX_FP8 / (MAX_FP8 * 3.5f), + MAX_FP8 / (MAX_FP8 * 4.f)}); + + auto op = make_shared(data, scale, destination_type); + + function = make_shared(OutputVector{op}, ParameterVector{data}); + } else { + auto data = make_shared(prec, input_shape); + auto scale = op::v0::Constant::create(prec, + scale_shape, + {MAX_FP8 / (MAX_FP8 / 2.f), + 1.0f, + MAX_FP8 / (MAX_FP8 * 3.5f), + MAX_FP8 / (MAX_FP8 * 4.f)}); + auto shift = op::v0::Constant::create(prec, shift_shape, {0.f, 0.f, 0.f, 0.f}); + + auto op = make_shared(data, scale, shift, destination_type); + + function = make_shared(OutputVector{op}, ParameterVector{data}); + } + } + + void generate_inputs(const std::vector& target_shapes) override { + inputs.clear(); + const float MAX_FP8 = (destination_type == ov::element::f8e4m3) ? fp8::MAX_F8E4M3 : fp8::MAX_F8E5M2; + const auto& func_inputs = function->inputs(); + auto& data_input = func_inputs[0]; + ov::Tensor tensor = ov::Tensor(data_input.get_element_type(), target_shapes[0]); + std::vector input_data{MAX_FP8 / 4.f, + MAX_FP8 / 3.f, + MAX_FP8 / 2.f, + MAX_FP8, + MAX_FP8, + MAX_FP8, + MAX_FP8 * 1.2f, + MAX_FP8 * 2.3f, + MAX_FP8 * 3.4f, + MAX_FP8 * 2.f, + MAX_FP8 * 3.f, + MAX_FP8 * 4.f}; + auto* data_ptr = tensor.data(); + for (size_t i = 0; i < input_data.size(); i++) { + data_ptr[i] = input_data[i]; + } + inputs.insert({data_input.get_node_shared_ptr(), tensor}); + } +}; + +TEST_P(FakeConvertTest, Inference) { + run(); +} + +const std::vector input_precisions = {ov::element::f32}; + +const std::vector input_shapes = {{4, 3}}; + +const ov::Shape scale_shape = {4, 1}; +const std::vector shift_shapes = {{4, 1}, {}}; +const std::vector destination_types = {ov::element::f8e4m3, ov::element::f8e5m2}; + +INSTANTIATE_TEST_SUITE_P(Smoke_FakeConvertTest, + FakeConvertTest, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::Values(scale_shape), + ::testing::ValuesIn(shift_shapes), + ::testing::ValuesIn(input_precisions), + ::testing::ValuesIn(destination_types), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + FakeConvertTest::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/impls_registry_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/impls_registry_test.cpp index a16cd20846a1c7..5dfc450e43905a 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/impls_registry_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/impls_registry_test.cpp @@ -85,6 +85,7 @@ #include "intel_gpu/primitives/swiglu.hpp" #include "intel_gpu/primitives/tile.hpp" #include "intel_gpu/primitives/unique.hpp" +#include "intel_gpu/primitives/fake_convert.hpp" #include "primitive_inst.h" #include "test_utils.h" @@ -226,5 +227,6 @@ TEST(registry_test, no_null_impls) { cldnn::unique_count, cldnn::unique_gather, cldnn::scaled_dot_product_attention, - cldnn::rope>(); + cldnn::rope, + cldnn::fake_convert>(); } From 5fc16c8bf3a0693d6aafd1b5ce7a2bf050db1a36 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 23 Dec 2024 14:33:39 +0400 Subject: [PATCH 54/60] [PT FE][DOCS] Document conversion of PyTorch models from disk (#28175) **Details:** Document conversion of PyTorch models from disk **Ticket:** TBD --------- Signed-off-by: Kazantsev, Roman --- .../convert-model-pytorch.rst | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst b/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst index 6ac806daf0cda0..62cfdf05f2b11f 100644 --- a/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst +++ b/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst @@ -203,6 +203,52 @@ Here is an example of how to convert a model obtained with ``torch.export``: This is an experimental feature. Use it only if you know that you need to. PyTorch version 2.2 is recommended. Dynamic shapes are not supported yet. +Converting a PyTorch Model from Disk +#################################### + +PyTorch provides the capability to save models in two distinct formats: ``torch.jit.ScriptModule`` and ``torch.export.ExportedProgram``. +Both formats can be saved to disk as standalone files, enabling them to be reloaded independently of the original Python code. + +ExportedProgram Format +++++++++++++++++++++++ + +The ``ExportedProgram`` format is saved on disk using `torch.export.save() `__. +Below is an example of how to convert an ``ExportedProgram`` from disk: + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. code-block:: py + :force: + + import openvino as ov + ov_model = ov.convert_model('exported_program.pt2') + + .. tab-item:: CLI + :sync: cli + + .. code-block:: sh + + ovc exported_program.pt2 + +ScriptModule Format ++++++++++++++++++++ + +`torch.jit.save() `__ serializes ``ScriptModule`` object on disk. +To convert the serialized ``ScriptModule`` format, run ``convert_model`` function with ``example_input`` parameter as follows: + +.. code-block:: py + :force: + + from openvino import convert_model + import torch + + convert_model(input_model='script_module.pt', example_input=torch.rand(1, 10)) + +``example_input`` is the required parameter for the conversion because ``torch.jit.ScriptModule`` object is always saved in an untraced state on disk. + Exporting a PyTorch Model to ONNX Format ######################################## From b0ff7090a305f94d6ec86f7b60d1833d0dc87be5 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Mon, 23 Dec 2024 19:12:34 +0800 Subject: [PATCH 55/60] fix coverity scan issue 1568450 (#28139) ### Details: - *fix below coverity scan issue* *** CID 1568450: Concurrent data access violations (MISSING_LOCK) /openvino/src/inference/src/os/lin/lin_system_conf.cpp: 225 in ov::CPU::CPU()::[lambda() (instance 2)]::operator ()() const() 219 return -1; 220 } else if (valid_cpu_mapping_table.size() == (unsigned)_processors) { 221 return 0; 222 } else { 223 _processors = valid_cpu_mapping_table.size(); 224 _cpu_mapping_table.swap(valid_cpu_mapping_table); >>> CID 1568450: Concurrent data access violations (MISSING_LOCK) >>> Accessing "this->this->_proc_type_table" without holding lock "ov::CPU._cpu_mutex". Elsewhere, "ov::CPU._proc_type_table" is written to with "CPU._cpu_mutex" held 2 out of 3 times. 225 update_valid_processor_linux(std::move(phy_core_list), 226 _numa_nodes, 227 _cores, 228 _proc_type_table, 229 _cpu_mapping_table); 230 return 0; ### Tickets: - *CID 1568450* --- src/inference/src/os/lin/lin_system_conf.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index f8bd16173b8fce..9b6247c6691814 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -188,6 +188,7 @@ CPU::CPU() { } else if (valid_cpu_mapping_table.size() == (unsigned)_processors) { return 0; } else { + std::lock_guard lock{_cpu_mutex}; _processors = valid_cpu_mapping_table.size(); _cpu_mapping_table.swap(valid_cpu_mapping_table); update_valid_processor_linux(std::move(phy_core_list), From 80115574aeebd79e2bec4050b702076c33deee23 Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Mon, 23 Dec 2024 15:40:14 +0000 Subject: [PATCH 56/60] [NPUW] Extend NPUW_DQ to work with NF4 for CW models (#28125) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- .../intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp index 5abe4b39fd44f2..0260fc9718c444 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp @@ -160,7 +160,8 @@ DQMatMulCWi::DQMatMulCWi(Context::Ref ctx) { auto qcoeff_shape = matched_node_qcoeff->output(0).get_shape(); if ((ov::element::i4 == matched_qweight->get_element_type() || - ov::element::i8 == matched_qweight->get_element_type()) && + ov::element::i8 == matched_qweight->get_element_type() || + ov::element::nf4 == matched_qweight->get_element_type()) && (ov::op::util::is_parameter(matched_node_qcoeff) || ov::op::util::is_constant(matched_node_qcoeff)) && qcoeff_shape[1] == 1 && !matched_matmul->get_transpose_a() && matched_matmul->get_transpose_b()) { auto matched_node_cvtw = node_to_output.at(qcvtw).get_node_shared_ptr(); From ae1fbbe52aa8177ae3799a49bb8066729445a6fd Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 23 Dec 2024 21:21:17 +0400 Subject: [PATCH 57/60] [GHA][JAX FE] Separate JAX layer tests to special job and have dedicated req file (#28178) **Details:** Separate JAX layer tests to special job and have dedicated req file **Ticket:** TBD --------- Signed-off-by: Kazantsev, Roman --- .github/workflows/job_jax_layer_tests.yml | 133 ++++++++++++++++++ .github/workflows/job_jax_models_tests.yml | 2 +- .github/workflows/job_python_unit_tests.yml | 8 -- .github/workflows/linux_arm64.yml | 10 ++ .github/workflows/mac.yml | 9 ++ .github/workflows/mac_arm64.yml | 9 ++ .github/workflows/ubuntu_22.yml | 10 ++ .github/workflows/ubuntu_24.yml | 10 ++ .github/workflows/windows_vs2019_release.yml | 9 ++ tests/CMakeLists.txt | 2 +- .../test_tf_UnaryOpsAllRealDomain.py | 2 +- tests/model_hub_tests/jax/requirements.txt | 10 -- tests/requirements_jax | 13 ++ tests/requirements_tensorflow | 2 +- 14 files changed, 207 insertions(+), 22 deletions(-) create mode 100644 .github/workflows/job_jax_layer_tests.yml delete mode 100644 tests/model_hub_tests/jax/requirements.txt create mode 100644 tests/requirements_jax diff --git a/.github/workflows/job_jax_layer_tests.yml b/.github/workflows/job_jax_layer_tests.yml new file mode 100644 index 00000000000000..25f171060f43be --- /dev/null +++ b/.github/workflows/job_jax_layer_tests.yml @@ -0,0 +1,133 @@ +name: JAX Layer Tests + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + container: + description: 'JSON to be converted to the value of the "container" configuration for the job' + type: string + required: false + default: '{"image": null}' + affected-components: + description: 'Components that are affected by changes in the commit defined by the Smart CI Action' + type: string + required: true + python-version: + description: 'Python version to setup. E.g., "3.11"' + type: string + required: true + +permissions: read-all + +env: + PIP_CACHE_PATH_LINUX: /mount/caches/pip/linux + PIP_CACHE_PATH_WIN: "C:\\mount\\caches\\pip\\win" + +jobs: + JAX_Layer_Tests: + name: JAX Layer Tests + timeout-minutes: 40 + runs-on: ${{ inputs.runner }} + container: ${{ fromJSON(inputs.container) }} + defaults: + run: + shell: ${{ contains(inputs.runner, 'win') && 'pwsh' || 'bash' }} + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + OPENVINO_REPO: ${{ github.workspace }}/openvino + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + INSTALL_WHEELS_DIR: ${{ github.workspace }}/install/wheels + LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests + steps: + - name: Download OpenVINO artifacts (tarballs) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + pattern: openvino_[tests]* + path: ${{ env.INSTALL_DIR }} + merge-multiple: true + + - name: Download OpenVINO artifacts (wheels) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + pattern: openvino_[wheels]* + path: ${{ env.INSTALL_WHEELS_DIR }} + merge-multiple: true + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + if: runner.os != 'Windows' + run: | + echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV" + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "INSTALL_WHEELS_DIR=$GITHUB_WORKSPACE/install/wheels" >> "$GITHUB_ENV" + echo "LAYER_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/layer_tests" >> "$GITHUB_ENV" + + - name: Install OpenVINO dependencies (mac) + if: runner.os == 'macOS' + run: brew install pigz + + - name: Extract OpenVINO packages (Linux, macOS) + if: runner.os != 'Windows' + run: | + pigz -dc openvino_tests.tar.gz | tar -xf - -C ${INSTALL_DIR} + working-directory: ${{ env.INSTALL_DIR }} + + - name: Extract OpenVINO artifacts (Windows) + if: runner.os == 'Windows' + run: | + Expand-Archive openvino_tests.zip -DestinationPath ${{ env.INSTALL_DIR }} + working-directory: ${{ env.INSTALL_DIR }} + + - name: Fetch setup_python and install wheels actions + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + timeout-minutes: 15 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + .github/actions/install_ov_wheels/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Setup Python ${{ inputs.python-version }} + uses: ./openvino/.github/actions/setup_python + with: + version: ${{ inputs.python-version }} + pip-cache-path: ${{ runner.os == 'Linux' && env.PIP_CACHE_PATH_LINUX || env.PIP_CACHE_PATH_WIN }} + should-setup-pip-paths: ${{ runner.os != 'macOS' }} + self-hosted-runner: ${{ runner.os != 'macOS' }} + + - name: Install OpenVINO Python wheels + uses: ./openvino/.github/actions/install_ov_wheels + with: + wheels-dir-path: ${{ env.INSTALL_WHEELS_DIR }} + wheels-to-install: 'openvino' + + - name: Install JAX Layer tests dependencies + run: | + # jax test requirements + python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/requirements_jax + + - name: JAX Layer Tests + if: ${{ fromJSON(inputs.affected-components).JAX_FE.test && runner.arch != 'ARM64' }} # Ticket: 126287, 142196 + run: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/jax_tests ${PARALLEL} -m precommit_jax_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-jax.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + JAX_TRACE_MODE: JAXPR + PARALLEL: ${{ runner.os == 'Windows' && ' ' || '-n logical'}} + + - name: Upload Test Results + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + if: ${{ !cancelled() }} + with: + name: test-results-python-jax-layers + path: | + ${{ env.INSTALL_TEST_DIR }}/TEST*.html + ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'warn' diff --git a/.github/workflows/job_jax_models_tests.yml b/.github/workflows/job_jax_models_tests.yml index 07155db1016057..57eb07a83aa423 100644 --- a/.github/workflows/job_jax_models_tests.yml +++ b/.github/workflows/job_jax_models_tests.yml @@ -89,7 +89,7 @@ jobs: - name: Install JAX tests requirements for precommit run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/jax/requirements.txt + python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/requirements_jax - name: JAX/Flax Models Tests from Hugging Face if: ${{ inputs.model_scope == 'precommit' || inputs.model_scope == 'nightly' }} diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index b04f719c8e296f..e1532d530ff2db 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -162,14 +162,6 @@ jobs: export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml - - name: JAX Layer Tests - JAX FE - if: ${{ fromJSON(inputs.affected-components).JAX_FE.test && runner.arch != 'ARM64' && runner.os != 'macOS' }} - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit_jax_fe --junitxml=${INSTALL_TEST_DIR}/TEST-jax_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - JAX_TRACE_MODE: JAXPR - - name: TensorFlow Lite Layer Tests - TFL FE if: fromJSON(inputs.affected-components).TFL_FE.test run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_lite_tests/ -n logical --junitxml=${INSTALL_TEST_DIR}/TEST-tfl_fe.xml diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 66e825e5d5e126..ca1ca6e056e23d 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -202,6 +202,16 @@ jobs: affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' + JAX_Layer_Tests: + name: JAX Layer Tests + needs: [ Build, Docker, Smart_CI ] + uses: ./.github/workflows/job_jax_layer_tests.yml + with: + runner: 'aks-linux-16-cores-32gb-arm' + container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_20_04_arm64 }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + python-version: '3.11' + CPU_Functional_Tests: name: CPU functional tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5e4335b8151c02..0fbc20cf19594b 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -356,6 +356,15 @@ jobs: affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' + JAX_Layer_Tests: + name: JAX Layer Tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_jax_layer_tests.yml + with: + runner: 'macos-13' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + python-version: '3.11' + CPU_Functional_Tests: name: CPU functional tests # if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 855d76973cc2e4..b60daefa442c83 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -355,6 +355,15 @@ jobs: affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' + JAX_Layer_Tests: + name: JAX Layer Tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_jax_layer_tests.yml + with: + runner: 'macos-13-xlarge' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + python-version: '3.11' + CPU_Functional_Tests: name: CPU functional tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test diff --git a/.github/workflows/ubuntu_22.yml b/.github/workflows/ubuntu_22.yml index 5aed74bbb242b8..e5c7d25003de1e 100644 --- a/.github/workflows/ubuntu_22.yml +++ b/.github/workflows/ubuntu_22.yml @@ -334,6 +334,16 @@ jobs: affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' + JAX_Layer_Tests: + name: JAX Layer Tests + needs: [ Docker, Build, Smart_CI ] + uses: ./.github/workflows/job_jax_layer_tests.yml + with: + runner: 'aks-linux-4-cores-16gb' + container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_22_04_x64 }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + python-version: '3.11' + CPU_Functional_Tests: name: CPU functional tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test diff --git a/.github/workflows/ubuntu_24.yml b/.github/workflows/ubuntu_24.yml index 25be095e692d35..beac15bfbda97d 100644 --- a/.github/workflows/ubuntu_24.yml +++ b/.github/workflows/ubuntu_24.yml @@ -156,6 +156,16 @@ jobs: affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.12' + JAX_Layer_Tests: + name: JAX Layer Tests + needs: [ Docker, Build, Smart_CI ] + uses: ./.github/workflows/job_jax_layer_tests.yml + with: + runner: 'aks-linux-4-cores-16gb' + container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_24_04_x64 }}", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + python-version: '3.12' + TensorFlow_Layer_Tests: name: TensorFlow Layer Tests needs: [ Docker, Build, Smart_CI, Openvino_tokenizers ] diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index f1fd0be596baa2..de33f2603d7430 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -499,6 +499,15 @@ jobs: affected-components: ${{ needs.smart_ci.outputs.affected_components }} python-version: '3.11' + JAX_Layer_Tests: + name: JAX Layer Tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_jax_layer_tests.yml + with: + runner: 'aks-win-8-cores-16gb' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + python-version: '3.11' + CXX_Unit_Tests: name: C++ unit tests needs: [ Build, Smart_CI ] diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 08b4308479ef03..de3ad80280d603 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -7,5 +7,5 @@ add_subdirectory(model_hub_tests) add_subdirectory(samples_tests) add_subdirectory(e2e_tests) -install(FILES requirements_pytorch requirements_tensorflow requirements_onnx +install(FILES requirements_pytorch requirements_tensorflow requirements_onnx requirements_jax DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py index 4ff4d589cbae32..5c1037e38cfc84 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py @@ -67,4 +67,4 @@ def test_unary_ops(self, input_shape, input_type, op_type, pytest.skip("159585: accuracy error on ARM") self._test(*self.create_unary_net(input_shape, input_type, op_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_legacy_frontend=use_legacy_frontend, custom_eps=1e-3) + use_legacy_frontend=use_legacy_frontend, custom_eps=3 * 1e-3) diff --git a/tests/model_hub_tests/jax/requirements.txt b/tests/model_hub_tests/jax/requirements.txt deleted file mode 100644 index 328084ac050ca6..00000000000000 --- a/tests/model_hub_tests/jax/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ --c ../../constraints.txt -numpy -pytest -pytest-html -transformers -requests -jax -jaxlib -flax -pillow \ No newline at end of file diff --git a/tests/requirements_jax b/tests/requirements_jax new file mode 100644 index 00000000000000..c392df4359bee3 --- /dev/null +++ b/tests/requirements_jax @@ -0,0 +1,13 @@ +numpy==1.26.4; python_version < "3.12" or platform_system == "Darwin" and platform_machine == "x86_64" +numpy==2.2.1; python_version >= "3.12" and (platform_system != "Darwin" or platform_machine != "x86_64") +pytest==7.0.1 +pytest-xdist[psutil]==3.6.1 +pytest-html==4.1.1 +jax==0.4.38; (platform_system != "Darwin" or platform_machine != "x86_64") and python_version > "3.9" +# tensorflow 2.16.2 depends on ml-dtypes~=0.3.1 and jax 0.4.35 depends on ml-dtypes>=0.4.0 +jax==0.4.33; (platform_system == "Darwin" and platform_machine == "x86_64") and python_version > "3.9" +jax==0.4.30; python_version <= "3.9" +flax==0.10.2 +transformers==4.47.1 +defusedxml +pillow diff --git a/tests/requirements_tensorflow b/tests/requirements_tensorflow index 5369b0135f7618..8e0d1141695ef9 100644 --- a/tests/requirements_tensorflow +++ b/tests/requirements_tensorflow @@ -17,7 +17,7 @@ wrapt==1.15.0; python_version >= "3.12" # tensorflow-text is not available for both Windows and ARM platforms tensorflow-text==2.18.0; python_version < "3.12" and platform_system == "Linux" and platform_machine == "x86_64" tensorflow-hub==0.16.1 -jax==0.4.35; (platform_system != "Darwin" or platform_machine != "x86_64") and python_version > "3.9" +jax==0.4.38; (platform_system != "Darwin" or platform_machine != "x86_64") and python_version > "3.9" # tensorflow 2.16.2 depends on ml-dtypes~=0.3.1 and jax 0.4.35 depends on ml-dtypes>=0.4.0 jax==0.4.33; (platform_system == "Darwin" and platform_machine == "x86_64") and python_version > "3.9" jax==0.4.30; python_version <= "3.9" From 92edc910c54e8b322dc75558c3e7fed0738e9797 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Tue, 24 Dec 2024 10:07:24 +0900 Subject: [PATCH 58/60] [GPU] Fix ConvolutionKernel_b_fs_yx_fsv16_1x1 to support input0 feature dynamic case (#28156) ### Details: - Fix ConvolutionKernel_b_fs_yx_fsv16_1x1 to support input0 feature dynamic case ### Tickets: - 146681 --- .../cl_kernels/convolution_gpu_bfyx_f16_1x1.cl | 5 ++--- .../convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp | 2 ++ .../tests/unit/test_cases/convolution_gpu_test.cpp | 9 ++++++++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_bfyx_f16_1x1.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_bfyx_f16_1x1.cl index 542fa69ebc241b..109fa2de9841aa 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_bfyx_f16_1x1.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_bfyx_f16_1x1.cl @@ -122,8 +122,8 @@ KERNEL(convolution_b_fs_yx_fsv16_1x1)( { #endif // SLM_DIV_FACTOR > 1 vec_t src = 0; -#if INPUT_LEFTOVERS - if ((k + 1) * FEATURE_SLICE_SIZE >= INPUT0_FEATURE_NUM) + + if (INPUT_LEFTOVERS && ((k + 1) * FEATURE_SLICE_SIZE >= INPUT0_FEATURE_NUM)) { if (k * FEATURE_SLICE_SIZE + sglid < INPUT0_FEATURE_NUM) { @@ -143,7 +143,6 @@ KERNEL(convolution_b_fs_yx_fsv16_1x1)( } } else -#endif // INPUT_LEFTOVERS { #if PADDED_INPUT #if X_BLOCK_SIZE > 1 diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp index 6fd074f8d8506d..7150d51ecf1e48 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp @@ -264,6 +264,8 @@ JitConstants ConvolutionKernel_b_fs_yx_fsv16_1x1::GetJitConstants(const convolut } if (params.inputs[0].Feature().v % tuning_data.feature_block_size != 0) { jit.AddConstant(MakeJitConstant("INPUT_LEFTOVERS", 1)); + } else { + jit.AddConstant(MakeJitConstant("INPUT_LEFTOVERS", 0)); } } else { DimensionAccessHelperJit input0_dims(params.inputs[0]); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index f0243f055c3670..13934020bfdf66 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -10820,7 +10820,14 @@ TEST_P(conv_dyn_test, convolution_gpu_fsv16_1x1_no_bias) { return outputs_ref.at("conv").get_memory(); }; - auto in_layout = layout{ov::PartialShape{ov::Dimension(), ov::Dimension(p.in_shape[1]), ov::Dimension(), ov::Dimension()}, data_types::f16, format::b_fs_yx_fsv16}; + cldnn::layout in_layout; + if (p.in_shape[2] % 2 == 0) { + // input feature is static + in_layout = layout{ov::PartialShape{ov::Dimension(), ov::Dimension(p.in_shape[1]), ov::Dimension(), ov::Dimension()}, data_types::f16, format::b_fs_yx_fsv16}; + } else { + // input feature is dynamic + in_layout = layout{ov::PartialShape{ov::Dimension(), ov::Dimension(), ov::Dimension(), ov::Dimension()}, data_types::f16, format::b_fs_yx_fsv16}; + } auto input = engine.allocate_memory({ p.in_shape, data_types::f16, format::b_fs_yx_fsv16 }); auto weights = engine.allocate_memory({p.wei_shape, data_types::f16, is_grouped ? format::bfzyx : format::bfyx}); From f62b94f0cd924ba9414b892dd270248059ff16ba Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Tue, 24 Dec 2024 11:09:29 +0800 Subject: [PATCH 59/60] support offline CPU in Linux (#28149) ### Details: - *support offline CPU in Linux* - *Ignore SOC Ecore of MTL* - *enable Ecore of LNL* - *similar PR of [PR 27870](https://github.com/openvinotoolkit/openvino/pull/27870) which is reverted* ### Tickets: - *CVS-154222, CVS-159641* - *[issues-26889](https://github.com/openvinotoolkit/openvino/issues/26889)* --- src/inference/src/os/lin/lin_system_conf.cpp | 366 ++++++++++-------- .../cpu_map_parser/cache_parser_linux.cpp | 245 ++++++++++++ .../unit/cpu_map_parser/freq_parser_linux.cpp | 183 +++++++++ 3 files changed, 642 insertions(+), 152 deletions(-) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 9b6247c6691814..64da4cb0ac836a 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -23,76 +23,107 @@ CPU::CPU() { std::vector> system_info_table; std::vector node_info_table; - auto get_cache_info_linux = [&]() { + constexpr int cache_info_mode = 1; + constexpr int freq_info_mode = 2; + + auto get_info_linux = [&](int mode) { int cpu_index = 0; - int cache_index = 0; - int cache_files = 3; + int file_index = 0; + int max_files = 3; - std::vector one_info(cache_files); + std::string one_info; - while (1) { - for (int n = 0; n < cache_files; n++) { - cache_index = (n == 0) ? n : n + 1; - - std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + "/cache/index" + - std::to_string(cache_index) + "/shared_cpu_list"); - if (!cache_file.is_open()) { - cache_index = -1; - break; - } - std::string cache_info; - std::getline(cache_file, cache_info); - one_info[n] = std::move(cache_info); - } + std::string::size_type pos = 0; + std::string::size_type endpos = 0; + std::string sub_str; - if (cache_index == -1) { - if (cpu_index == 0) { - return -1; - } else { - return 0; - } - } else { - system_info_table.push_back(one_info); - cpu_index++; - } + int core_1; + int core_2; + + system_info_table.clear(); + + std::ifstream possible_file("/sys/devices/system/cpu/possible"); + std::string possible_info; + + if (possible_file.is_open()) { + std::getline(possible_file, possible_info); + } else { + return -1; } - return 0; - }; + if ((endpos = possible_info.find('-', pos)) != std::string::npos) { + sub_str = possible_info.substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = possible_info.substr(endpos + 1); + core_2 = std::stoi(sub_str); + system_info_table.resize(core_2 + 1, std::vector(max_files, "")); + } else { + return -1; + } - auto get_freq_info_linux = [&]() { - int cpu_index = 0; - int cache_index = 0; + std::ifstream online_file("/sys/devices/system/cpu/online"); + std::string online_info; - std::vector file_name = {"/topology/core_cpus_list", - "/topology/physical_package_id", - "/cpufreq/cpuinfo_max_freq"}; - int num_of_files = file_name.size(); - std::vector one_info(num_of_files); + if (online_file.is_open()) { + std::getline(online_file, online_info); + } else { + system_info_table.clear(); + return -1; + } while (1) { - for (int n = 0; n < num_of_files; n++) { - cache_index = n; + if ((endpos = online_info.find('-', pos)) != std::string::npos) { + sub_str = online_info.substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = online_info.substr(endpos + 1); + core_2 = std::stoi(sub_str); - std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + file_name[n]); - if (!cache_file.is_open()) { - cache_index = -1; - break; + for (cpu_index = core_1; cpu_index <= core_2; cpu_index++) { + if (mode == cache_info_mode) { + for (int n = 0; n < max_files; n++) { + file_index = (n == 0) ? n : n + 1; + one_info.clear(); + + std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + + "/cache/index" + std::to_string(file_index) + "/shared_cpu_list"); + if (cache_file.is_open()) { + std::getline(cache_file, one_info); + } else { + if ((cpu_index == core_1) && (n == 0)) { + system_info_table.clear(); + return -1; + } + } + system_info_table[cpu_index][n] = std::move(one_info); + } + } else { + std::vector file_name = {"/topology/core_cpus_list", + "/topology/physical_package_id", + "/cpufreq/cpuinfo_max_freq"}; + + for (int n = 0; n < max_files; n++) { + one_info.clear(); + + std::ifstream cache_file("/sys/devices/system/cpu/cpu" + std::to_string(cpu_index) + + file_name[n]); + if (cache_file.is_open()) { + std::getline(cache_file, one_info); + } else { + if ((cpu_index == core_1) && (n == 2)) { + system_info_table.clear(); + return -1; + } + } + system_info_table[cpu_index][n] = std::move(one_info); + } + } } - std::string cache_info; - std::getline(cache_file, cache_info); - one_info[n] = std::move(cache_info); } - if (cache_index == -1) { - if (cpu_index == 0) { - return -1; - } else { - return 0; - } + if ((pos = online_info.find(',', endpos)) != std::string::npos) { + pos++; } else { - system_info_table.push_back(one_info); - cpu_index++; + break; } } @@ -202,7 +233,7 @@ CPU::CPU() { get_node_info_linux(); - if (!get_cache_info_linux()) { + if (!get_info_linux(cache_info_mode)) { parse_cache_info_linux(system_info_table, node_info_table, _processors, @@ -216,7 +247,7 @@ CPU::CPU() { if ((_proc_type_table.size() == 0) || ((_proc_type_table[0][MAIN_CORE_PROC] == 0) && (_proc_type_table[0][ALL_PROC] > 0) && (_proc_type_table[0][ALL_PROC] != _proc_type_table[0][EFFICIENT_CORE_PROC]))) { - if (!get_freq_info_linux()) { + if (!get_info_linux(freq_info_mode)) { parse_freq_info_linux(system_info_table, node_info_table, _processors, @@ -472,56 +503,73 @@ void parse_cache_info_linux(const std::vector> system_i const std::vector line_value_0({0, 0, 0, 0, -1, -1}); - for (int n = 0; n < _processors; n++) { - if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { - std::string::size_type pos = 0; - std::string::size_type endpos = 0; - std::string sub_str; - - int core_1; - int core_2; + std::vector offline_list; + int info_index = 0; - if (0 == _sockets) { - _proc_type_table.push_back(line_value_0); - } else { - _proc_type_table.push_back(_proc_type_table[0]); - _proc_type_table[0] = line_value_0; - } - - while (1) { - if ((endpos = system_info_table[n][2].find('-', pos)) != std::string::npos) { - sub_str = system_info_table[n][2].substr(pos, endpos - pos); - core_1 = std::stoi(sub_str); - sub_str = system_info_table[n][2].substr(endpos + 1); - core_2 = std::stoi(sub_str); + for (int n = 0; n < _processors; n++) { + if ((system_info_table[n][2].size() > 0) || (system_info_table[n][1].size() > 0)) { + info_index = system_info_table[n][2].size() > 0 ? 2 : 1; + if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { + std::string::size_type pos = 0; + std::string::size_type endpos = 0; + std::string sub_str; + + int core_1; + int core_2; + + if (0 == _sockets) { + _proc_type_table.push_back(line_value_0); + } else { + _proc_type_table.push_back(_proc_type_table[0]); + _proc_type_table[0] = line_value_0; + } - for (int m = core_1; m <= core_2; m++) { - _cpu_mapping_table[m][CPU_MAP_SOCKET_ID] = _sockets; - _cpu_mapping_table[m][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[m][CPU_MAP_SOCKET_ID]; - update_proc_map_info(m); + while (1) { + if ((endpos = system_info_table[n][info_index].find('-', pos)) != std::string::npos) { + sub_str = system_info_table[n][info_index].substr(pos, endpos - pos); + core_1 = std::stoi(sub_str); + sub_str = system_info_table[n][info_index].substr(endpos + 1); + core_2 = std::stoi(sub_str); + + if ((info_index == 1) && (core_2 - core_1 == 1)) { + offline_list.push_back(n); + break; + } + for (int m = core_1; m <= core_2; m++) { + _cpu_mapping_table[m][CPU_MAP_SOCKET_ID] = _sockets; + _cpu_mapping_table[m][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[m][CPU_MAP_SOCKET_ID]; + update_proc_map_info(m); + if (_processors == 0) { + return; + }; + } + } else if (pos != std::string::npos) { + sub_str = system_info_table[n][info_index].substr(pos); + core_1 = std::stoi(sub_str); + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = _sockets; + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + update_proc_map_info(core_1); if (_processors == 0) { return; }; + endpos = pos; } - } else if (pos != std::string::npos) { - sub_str = system_info_table[n][2].substr(pos); - core_1 = std::stoi(sub_str); - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = _sockets; - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - update_proc_map_info(core_1); - if (_processors == 0) { - return; - }; - endpos = pos; - } - if ((pos = system_info_table[n][2].find(',', endpos)) != std::string::npos) { - pos++; - } else { - break; + if ((pos = system_info_table[n][2].find(',', endpos)) != std::string::npos) { + pos++; + } else { + break; + } + } + _sockets++; + if (_proc_type_table[0][ALL_PROC] == 0) { + _proc_type_table.erase(_proc_type_table.begin()); + _sockets--; } } - _sockets++; + } else { + offline_list.push_back(n); } } @@ -541,6 +589,11 @@ void parse_cache_info_linux(const std::vector> system_i _numa_nodes = node_info_table.size(); parse_node_info_linux(node_info_table, _numa_nodes, _sockets, _proc_type_table, _cpu_mapping_table); } + + for (size_t n = 0; n < offline_list.size(); n++) { + _cpu_mapping_table.erase(_cpu_mapping_table.begin() + offline_list[n] - n); + _processors--; + } }; void get_cpu_mapping_from_cores(const int _processors, @@ -616,7 +669,6 @@ void parse_freq_info_linux(const std::vector> system_in std::vector>& _cpu_mapping_table) { int freq_max = 0; bool ecore_enabled = false; - bool ht_enabled = false; _processors = system_info_table.size(); _numa_nodes = 0; @@ -626,6 +678,8 @@ void parse_freq_info_linux(const std::vector> system_in std::vector line_value_0(PROC_TYPE_TABLE_SIZE, 0); + std::vector offline_list; + auto clean_up_output = [&]() { _processors = 0; _cores = 0; @@ -637,65 +691,68 @@ void parse_freq_info_linux(const std::vector> system_in }; for (int n = 0; n < _processors; n++) { - if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { - std::string::size_type pos = 0; - std::string::size_type endpos1 = 0; - std::string::size_type endpos2 = 0; - std::string sub_str; - - int core_1 = 0; - int core_2 = 0; - - if (((endpos1 = system_info_table[n][0].find(',', pos)) != std::string::npos) || - ((endpos2 = system_info_table[n][0].find('-', pos)) != std::string::npos)) { - endpos1 = (endpos1 != std::string::npos) ? endpos1 : endpos2; - sub_str = system_info_table[n][0].substr(pos, endpos1 - pos); - core_1 = std::stoi(sub_str); - sub_str = system_info_table[n][0].substr(endpos1 + 1); - core_2 = std::stoi(sub_str); - if ((core_1 != n) && (core_2 != n)) { - clean_up_output(); - return; - } - - _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = HYPER_THREADING_PROC; - _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + if (system_info_table[n][2].size() > 0) { + if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { + std::string::size_type pos = 0; + std::string::size_type endpos1 = 0; + std::string::size_type endpos2 = 0; + std::string sub_str; + + int core_1 = 0; + int core_2 = 0; + + if (((endpos1 = system_info_table[n][0].find(',', pos)) != std::string::npos) || + ((endpos2 = system_info_table[n][0].find('-', pos)) != std::string::npos)) { + endpos1 = (endpos1 != std::string::npos) ? endpos1 : endpos2; + sub_str = system_info_table[n][0].substr(pos, endpos1 - pos); + core_1 = std::stoi(sub_str); + sub_str = system_info_table[n][0].substr(endpos1 + 1); + core_2 = std::stoi(sub_str); + if ((core_1 != n) && (core_2 != n)) { + clean_up_output(); + return; + } - _cpu_mapping_table[core_2][CPU_MAP_PROCESSOR_ID] = core_2; - _cpu_mapping_table[core_2][CPU_MAP_SOCKET_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_2][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_2][CPU_MAP_CORE_ID] = _cpu_mapping_table[core_1][CPU_MAP_CORE_ID]; - _cpu_mapping_table[core_2][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; - _cpu_mapping_table[core_2][CPU_MAP_GROUP_ID] = _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID]; + _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = HYPER_THREADING_PROC; + _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + + _cpu_mapping_table[core_2][CPU_MAP_PROCESSOR_ID] = core_2; + _cpu_mapping_table[core_2][CPU_MAP_SOCKET_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_2][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_2][CPU_MAP_CORE_ID] = _cpu_mapping_table[core_1][CPU_MAP_CORE_ID]; + _cpu_mapping_table[core_2][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; + _cpu_mapping_table[core_2][CPU_MAP_GROUP_ID] = _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID]; + + int core_freq = std::stoi(system_info_table[core_1][2]); + freq_max = std::max(core_freq, freq_max); + } else if (system_info_table[n][0].size() > 0) { + core_1 = std::stoi(system_info_table[n][0]); - ht_enabled = true; - int core_freq = std::stoi(system_info_table[core_1][2]); - freq_max = std::max(core_freq, freq_max); - } else if (system_info_table[n][0].size() > 0) { - core_1 = std::stoi(system_info_table[n][0]); + _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; + _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); + _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; + _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; - _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; - _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); - _cpu_mapping_table[core_1][CPU_MAP_NUMA_NODE_ID] = _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]; - _cpu_mapping_table[core_1][CPU_MAP_CORE_ID] = _cores; + int core_freq = std::stoi(system_info_table[core_1][2]); + if ((0 == freq_max) || (core_freq >= freq_max * 0.97)) { + freq_max = std::max(core_freq, freq_max); + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; + } else { + _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = EFFICIENT_CORE_PROC; + ecore_enabled = true; + } - int core_freq = std::stoi(system_info_table[core_1][2]); - if (((0 == freq_max) || (core_freq >= freq_max * 0.95)) && (!ht_enabled)) { - freq_max = std::max(core_freq, freq_max); - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = MAIN_CORE_PROC; - } else { - _cpu_mapping_table[core_1][CPU_MAP_CORE_TYPE] = EFFICIENT_CORE_PROC; - ecore_enabled = true; + _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; } - - _cpu_mapping_table[core_1][CPU_MAP_GROUP_ID] = _cores; + _sockets = std::max(_sockets, _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]); + _cores++; } - _sockets = std::max(_sockets, _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID]); - _cores++; + } else { + offline_list.push_back(n); } } @@ -734,6 +791,11 @@ void parse_freq_info_linux(const std::vector> system_in _numa_nodes = node_info_table.size(); parse_node_info_linux(node_info_table, _numa_nodes, _sockets, _proc_type_table, _cpu_mapping_table); } + + for (size_t n = 0; n < offline_list.size(); n++) { + _cpu_mapping_table.erase(_cpu_mapping_table.begin() + offline_list[n] - n); + _processors--; + } }; void update_valid_processor_linux(const std::vector phy_core_list, diff --git a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp index 8679090b9ae491..9ea43bd0604296 100644 --- a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp @@ -385,6 +385,188 @@ LinuxCpuMapTestCase cache_1sockets_96cores = { {"0-95"}, }, }; +LinuxCpuMapTestCase cache_2sockets_56cores_hyperthreading = { + 110, + 2, + 2, + 56, + {{110, 56, 0, 54, -1, -1}, {54, 28, 0, 26, 0, 0}, {56, 28, 0, 28, 1, 1}}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {11, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {12, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {13, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {14, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {15, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {16, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {17, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {18, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {19, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {21, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {22, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {23, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {24, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {25, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {26, 0, 0, 24, HYPER_THREADING_PROC, 24, -1}, {27, 0, 0, 25, HYPER_THREADING_PROC, 25, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 1, 1, 48, HYPER_THREADING_PROC, 48, -1}, {49, 1, 1, 49, HYPER_THREADING_PROC, 49, -1}, + {50, 1, 1, 50, HYPER_THREADING_PROC, 50, -1}, {51, 1, 1, 51, HYPER_THREADING_PROC, 51, -1}, + {52, 1, 1, 52, HYPER_THREADING_PROC, 52, -1}, {53, 1, 1, 53, HYPER_THREADING_PROC, 53, -1}, + {54, 1, 1, 54, HYPER_THREADING_PROC, 54, -1}, {55, 1, 1, 55, HYPER_THREADING_PROC, 55, -1}, + {56, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {57, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {58, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {59, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {60, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {61, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {62, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {63, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {64, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {65, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {66, 0, 0, 26, MAIN_CORE_PROC, 26, -1}, {67, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, + {68, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, {69, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, + {70, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, {71, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, + {72, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, {73, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, + {74, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, {75, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, + {76, 0, 0, 27, MAIN_CORE_PROC, 27, -1}, {77, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {78, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {79, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {80, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {81, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {82, 0, 0, 24, MAIN_CORE_PROC, 24, -1}, {83, 0, 0, 25, MAIN_CORE_PROC, 25, -1}, + {84, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {85, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {86, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {87, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {88, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {89, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {90, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {91, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {92, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {93, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {94, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {95, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {96, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {97, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {98, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {99, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {100, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {101, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {102, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {103, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + {104, 1, 1, 48, MAIN_CORE_PROC, 48, -1}, {105, 1, 1, 49, MAIN_CORE_PROC, 49, -1}, + {106, 1, 1, 50, MAIN_CORE_PROC, 50, -1}, {107, 1, 1, 51, MAIN_CORE_PROC, 51, -1}, + {108, 1, 1, 52, MAIN_CORE_PROC, 52, -1}, {109, 1, 1, 53, MAIN_CORE_PROC, 53, -1}, + {110, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {111, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + }, + { + {"0,56", "0,56", "0-9,11-19,21-27,56-83"}, + {"1,57", "1,57", "0-9,11-19,21-27,56-83"}, + {"2,58", "2,58", "0-9,11-19,21-27,56-83"}, + {"3,59", "3,59", "0-9,11-19,21-27,56-83"}, + {"4,60", "4,60", "0-9,11-19,21-27,56-83"}, + {"5,61", "5,61", "0-9,11-19,21-27,56-83"}, + {"6,62", "6,62", "0-9,11-19,21-27,56-83"}, + {"7,63", "7,63", "0-9,11-19,21-27,56-83"}, + {"8,64", "8,64", "0-9,11-19,21-27,56-83"}, + {"9,65", "9,65", "0-9,11-19,21-27,56-83"}, + {"", "", ""}, + {"11,67", "11,67", "0-9,11-19,21-27,56-83"}, + {"12,68", "12,68", "0-9,11-19,21-27,56-83"}, + {"13,69", "13,69", "0-9,11-19,21-27,56-83"}, + {"14,70", "14,70", "0-9,11-19,21-27,56-83"}, + {"15,71", "15,71", "0-9,11-19,21-27,56-83"}, + {"16,72", "16,72", "0-9,11-19,21-27,56-83"}, + {"17,73", "17,73", "0-9,11-19,21-27,56-83"}, + {"18,74", "18,74", "0-9,11-19,21-27,56-83"}, + {"19,75", "19,75", "0-9,11-19,21-27,56-83"}, + {"", "", ""}, + {"21,77", "21,77", "0-9,11-19,21-27,56-83"}, + {"22,78", "22,78", "0-9,11-19,21-27,56-83"}, + {"23,79", "23,79", "0-9,11-19,21-27,56-83"}, + {"24,80", "24,80", "0-9,11-19,21-27,56-83"}, + {"25,81", "25,81", "0-9,11-19,21-27,56-83"}, + {"26,82", "26,82", "0-9,11-19,21-27,56-83"}, + {"27,83", "27,83", "0-9,11-19,21-27,56-83"}, + {"28,84", "28,84", "28-55,84-111"}, + {"29,85", "29,85", "28-55,84-111"}, + {"30,86", "30,86", "28-55,84-111"}, + {"31,87", "31,87", "28-55,84-111"}, + {"32,88", "32,88", "28-55,84-111"}, + {"33,89", "33,89", "28-55,84-111"}, + {"34,90", "34,90", "28-55,84-111"}, + {"35,91", "35,91", "28-55,84-111"}, + {"36,92", "36,92", "28-55,84-111"}, + {"37,93", "37,93", "28-55,84-111"}, + {"38,94", "38,94", "28-55,84-111"}, + {"39,95", "39,95", "28-55,84-111"}, + {"40,96", "40,96", "28-55,84-111"}, + {"41,97", "41,97", "28-55,84-111"}, + {"42,98", "42,98", "28-55,84-111"}, + {"43,99", "43,99", "28-55,84-111"}, + {"44,100", "44,100", "28-55,84-111"}, + {"45,101", "45,101", "28-55,84-111"}, + {"46,102", "46,102", "28-55,84-111"}, + {"47,103", "47,103", "28-55,84-111"}, + {"48,104", "48,104", "28-55,84-111"}, + {"49,105", "49,105", "28-55,84-111"}, + {"50,106", "50,106", "28-55,84-111"}, + {"51,107", "51,107", "28-55,84-111"}, + {"52,108", "52,108", "28-55,84-111"}, + {"53,109", "53,109", "28-55,84-111"}, + {"54,110", "54,110", "28-55,84-111"}, + {"55,111", "55,111", "28-55,84-111"}, + {"0,56", "0,56", "0-9,11-19,21-27,56-83"}, + {"1,57", "1,57", "0-9,11-19,21-27,56-83"}, + {"2,58", "2,58", "0-9,11-19,21-27,56-83"}, + {"3,59", "3,59", "0-9,11-19,21-27,56-83"}, + {"4,60", "4,60", "0-9,11-19,21-27,56-83"}, + {"5,61", "5,61", "0-9,11-19,21-27,56-83"}, + {"6,62", "6,62", "0-9,11-19,21-27,56-83"}, + {"7,63", "7,63", "0-9,11-19,21-27,56-83"}, + {"8,64", "8,64", "0-9,11-19,21-27,56-83"}, + {"9,65", "9,65", "0-9,11-19,21-27,56-83"}, + {"66", "66", "0-9,11-19,21-27,56-83"}, + {"11,67", "11,67", "0-9,11-19,21-27,56-83"}, + {"12,68", "12,68", "0-9,11-19,21-27,56-83"}, + {"13,69", "13,69", "0-9,11-19,21-27,56-83"}, + {"14,70", "14,70", "0-9,11-19,21-27,56-83"}, + {"15,71", "15,71", "0-9,11-19,21-27,56-83"}, + {"16,72", "16,72", "0-9,11-19,21-27,56-83"}, + {"17,73", "17,73", "0-9,11-19,21-27,56-83"}, + {"18,74", "18,74", "0-9,11-19,21-27,56-83"}, + {"19,75", "19,75", "0-9,11-19,21-27,56-83"}, + {"76", "76", "0-9,11-19,21-27,56-83"}, + {"21,77", "21,77", "0-9,11-19,21-27,56-83"}, + {"22,78", "22,78", "0-9,11-19,21-27,56-83"}, + {"23,79", "23,79", "0-9,11-19,21-27,56-83"}, + {"24,80", "24,80", "0-9,11-19,21-27,56-83"}, + {"25,81", "25,81", "0-9,11-19,21-27,56-83"}, + {"26,82", "26,82", "0-9,11-19,21-27,56-83"}, + {"27,83", "27,83", "0-9,11-19,21-27,56-83"}, + {"28,84", "28,84", "28-55,84-111"}, + {"29,85", "29,85", "28-55,84-111"}, + {"30,86", "30,86", "28-55,84-111"}, + {"31,87", "31,87", "28-55,84-111"}, + {"32,88", "32,88", "28-55,84-111"}, + {"33,89", "33,89", "28-55,84-111"}, + {"34,90", "34,90", "28-55,84-111"}, + {"35,91", "35,91", "28-55,84-111"}, + {"36,92", "36,92", "28-55,84-111"}, + {"37,93", "37,93", "28-55,84-111"}, + {"38,94", "38,94", "28-55,84-111"}, + {"39,95", "39,95", "28-55,84-111"}, + {"40,96", "40,96", "28-55,84-111"}, + {"41,97", "41,97", "28-55,84-111"}, + {"42,98", "42,98", "28-55,84-111"}, + {"43,99", "43,99", "28-55,84-111"}, + {"44,100", "44,100", "28-55,84-111"}, + {"45,101", "45,101", "28-55,84-111"}, + {"46,102", "46,102", "28-55,84-111"}, + {"47,103", "47,103", "28-55,84-111"}, + {"48,104", "48,104", "28-55,84-111"}, + {"49,105", "49,105", "28-55,84-111"}, + {"50,106", "50,106", "28-55,84-111"}, + {"51,107", "51,107", "28-55,84-111"}, + {"52,108", "52,108", "28-55,84-111"}, + {"53,109", "53,109", "28-55,84-111"}, + {"54,110", "54,110", "28-55,84-111"}, + {"55,111", "55,111", "28-55,84-111"}, + }, + { + {"0-9,11-19,21-27,56-83"}, + {"28-55,84-111"}, + }, +}; LinuxCpuMapTestCase cache_2sockets_48cores_hyperthreading = { 96, 2, @@ -1005,6 +1187,36 @@ LinuxCpuMapTestCase cache_2sockets_20cores_hyperthreading_1 = { }, {}, }; +LinuxCpuMapTestCase cache_1sockets_16cores_hyperthreading = { + 20, + 1, + 1, + 14, + {{20, 6, 8, 6, 0, 0}}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {3, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {5, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 6, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 6, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 6, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 7, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 7, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 7, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 7, -1}, + }, + { + {"0,5", "0,5", "0-19"}, {"1-2", "1-2", "0-19"}, {"1-2", "1-2", "0-19"}, {"3-4", "3-4", "0-19"}, + {"3-4", "3-4", "0-19"}, {"0,5", "0,5", "0-19"}, {"6-7", "6-7", "0-19"}, {"6-7", "6-7", "0-19"}, + {"8-9", "8-9", "0-19"}, {"8-9", "8-9", "0-19"}, {"10-11", "10-11", "0-19"}, {"10-11", "10-11", "0-19"}, + {"12", "12-15", "0-19"}, {"13", "12-15", "0-19"}, {"14", "12-15", "0-19"}, {"15", "12-15", "0-19"}, + {"16", "16-19", "0-19"}, {"17", "16-19", "0-19"}, {"18", "16-19", "0-19"}, {"19", "16-19", "0-19"}, + {"20", "20-21", ""}, {"21", "20-21", ""}, + }, + { + {"0-21"}, + }, +}; LinuxCpuMapTestCase cache_1sockets_14cores_hyperthreading = { 20, 1, @@ -1135,6 +1347,36 @@ LinuxCpuMapTestCase cache_1sockets_8cores_hyperthreading = { }, {{"0-11"}}, }; +LinuxCpuMapTestCase cache_1sockets_8cores_hyperthreading_1 = { + 8, + 1, + 1, + 8, + {{8, 4, 4, 0, 0, 0}}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, + {5, 0, 0, 5, EFFICIENT_CORE_PROC, 4, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 4, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 4, -1}, + }, + { + {"0", "0", "0-3"}, + {"1", "1", "0-3"}, + {"2", "2", "0-3"}, + {"3", "3", "0-3"}, + {"4", "4-7", ""}, + {"5", "4-7", ""}, + {"6", "4-7", ""}, + {"7", "4-7", ""}, + }, + { + {"0-7"}, + }, +}; LinuxCpuMapTestCase cache_1sockets_6cores_hyperthreading = { 12, 1, @@ -1220,6 +1462,7 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapCacheParserTests, testing::Values(cache_2sockets_104cores_hyperthreading, cache_1sockets_96cores, + cache_2sockets_56cores_hyperthreading, cache_2sockets_48cores_hyperthreading, cache_2sockets_48cores_hyperthreading_1, cache_2sockets_24cores_hyperthreading, @@ -1229,10 +1472,12 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, cache_2sockets_48cores_2, cache_2sockets_20cores_hyperthreading, cache_2sockets_20cores_hyperthreading_1, + cache_1sockets_16cores_hyperthreading, cache_1sockets_14cores_hyperthreading, cache_1sockets_14cores_hyperthreading_1, cache_1sockets_10cores_hyperthreading, cache_1sockets_8cores_hyperthreading, + cache_1sockets_8cores_hyperthreading_1, cache_1sockets_6cores_hyperthreading, cache_1sockets_4cores, cache_VM_cache_0)); diff --git a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp index 04ab617961b953..8ccdfad011d19c 100644 --- a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp @@ -258,6 +258,188 @@ LinuxCpuMapTestCase freq_2sockets_112cores_hyperthreading = { }, // param[in]: The CPU frequency information table of this simulated platform {{"0-55,112-167"}, {"56-111,168-223"}}, // param[in]: The numa node information table of this simulated platform }; +LinuxCpuMapTestCase freq_2sockets_56cores_hyperthreading = { + 110, + 2, + 2, + 56, + {{110, 56, 0, 54, -1, -1}, {54, 28, 0, 26, 0, 0}, {56, 28, 0, 28, 1, 1}}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {11, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {12, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {13, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {14, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {15, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {16, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {17, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {18, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {19, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {21, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {22, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {23, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {24, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {25, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {26, 0, 0, 24, HYPER_THREADING_PROC, 24, -1}, {27, 0, 0, 25, HYPER_THREADING_PROC, 25, -1}, + {28, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {29, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {30, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {31, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {32, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {33, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {34, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {35, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {36, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {37, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {38, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {39, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {40, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {41, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {42, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {43, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {44, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {45, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {46, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {47, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {48, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {49, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {50, 1, 1, 48, HYPER_THREADING_PROC, 48, -1}, {51, 1, 1, 49, HYPER_THREADING_PROC, 49, -1}, + {52, 1, 1, 50, HYPER_THREADING_PROC, 50, -1}, {53, 1, 1, 51, HYPER_THREADING_PROC, 51, -1}, + {54, 1, 1, 52, HYPER_THREADING_PROC, 52, -1}, {55, 1, 1, 53, HYPER_THREADING_PROC, 53, -1}, + {56, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {57, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {58, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {59, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {60, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {61, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {62, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {63, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {64, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {65, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {66, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {67, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, + {68, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, {69, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, + {70, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, {71, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, + {72, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, {73, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, + {74, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, {75, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, + {76, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, {77, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {78, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {79, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {80, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {81, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {82, 0, 0, 24, MAIN_CORE_PROC, 24, -1}, {83, 0, 0, 25, MAIN_CORE_PROC, 25, -1}, + {84, 1, 1, 26, MAIN_CORE_PROC, 26, -1}, {85, 1, 1, 27, MAIN_CORE_PROC, 27, -1}, + {86, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {87, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {88, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {89, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {90, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {91, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {92, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {93, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {94, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {95, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {96, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {97, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {98, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {99, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {100, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {101, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {102, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {103, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {104, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {105, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + {106, 1, 1, 48, MAIN_CORE_PROC, 48, -1}, {107, 1, 1, 49, MAIN_CORE_PROC, 49, -1}, + {108, 1, 1, 50, MAIN_CORE_PROC, 50, -1}, {109, 1, 1, 51, MAIN_CORE_PROC, 51, -1}, + {110, 1, 1, 52, MAIN_CORE_PROC, 52, -1}, {111, 1, 1, 53, MAIN_CORE_PROC, 53, -1}, + }, + { + {"0,56", "0", "3500000"}, + {"1,57", "0", "3500000"}, + {"2,58", "0", "3500000"}, + {"3,59", "0", "3500000"}, + {"4,60", "0", "3500000"}, + {"5,61", "0", "3500000"}, + {"6,62", "0", "3500000"}, + {"7,63", "0", "3500000"}, + {"8,64", "0", "3500000"}, + {"9,65", "0", "3500000"}, + {"", "", ""}, + {"11,67", "0", "3500000"}, + {"12,68", "0", "3500000"}, + {"13,69", "0", "3500000"}, + {"14,70", "0", "3500000"}, + {"15,71", "0", "3500000"}, + {"16,72", "0", "3500000"}, + {"17,73", "0", "3500000"}, + {"18,74", "0", "3500000"}, + {"19,75", "0", "3500000"}, + {"", "", ""}, + {"21,77", "0", "3500000"}, + {"22,78", "0", "3500000"}, + {"23,79", "0", "3500000"}, + {"24,80", "0", "3500000"}, + {"25,81", "0", "3500000"}, + {"26,82", "0", "3500000"}, + {"27,83", "0", "3500000"}, + {"28,84", "1", "3500000"}, + {"29,85", "1", "3500000"}, + {"30,86", "1", "3500000"}, + {"31,87", "1", "3500000"}, + {"32,88", "1", "3500000"}, + {"33,89", "1", "3500000"}, + {"34,90", "1", "3500000"}, + {"35,91", "1", "3500000"}, + {"36,92", "1", "3500000"}, + {"37,93", "1", "3500000"}, + {"38,94", "1", "3500000"}, + {"39,95", "1", "3500000"}, + {"40,96", "1", "3500000"}, + {"41,97", "1", "3500000"}, + {"42,98", "1", "3500000"}, + {"43,99", "1", "3500000"}, + {"44,100", "1", "3500000"}, + {"45,101", "1", "3500000"}, + {"46,102", "1", "3500000"}, + {"47,103", "1", "3500000"}, + {"48,104", "1", "3500000"}, + {"49,105", "1", "3500000"}, + {"50,106", "1", "3500000"}, + {"51,107", "1", "3500000"}, + {"52,108", "1", "3500000"}, + {"53,109", "1", "3500000"}, + {"54,110", "1", "3500000"}, + {"55,111", "1", "3500000"}, + {"0,56", "0", "3500000"}, + {"1,57", "0", "3500000"}, + {"2,58", "0", "3500000"}, + {"3,59", "0", "3500000"}, + {"4,60", "0", "3500000"}, + {"5,61", "0", "3500000"}, + {"6,62", "0", "3500000"}, + {"7,63", "0", "3500000"}, + {"8,64", "0", "3500000"}, + {"9,65", "0", "3500000"}, + {"66", "0", "3500000"}, + {"11,67", "0", "3500000"}, + {"12,68", "0", "3500000"}, + {"13,69", "0", "3500000"}, + {"14,70", "0", "3500000"}, + {"15,71", "0", "3500000"}, + {"16,72", "0", "3500000"}, + {"17,73", "0", "3500000"}, + {"18,74", "0", "3500000"}, + {"19,75", "0", "3500000"}, + {"76", "0", "3500000"}, + {"21,77", "0", "3500000"}, + {"22,78", "0", "3500000"}, + {"23,79", "0", "3500000"}, + {"24,80", "0", "3500000"}, + {"25,81", "0", "3500000"}, + {"26,82", "0", "3500000"}, + {"27,83", "0", "3500000"}, + {"28,84", "1", "3500000"}, + {"29,85", "1", "3500000"}, + {"30,86", "1", "3500000"}, + {"31,87", "1", "3500000"}, + {"32,88", "1", "3500000"}, + {"33,89", "1", "3500000"}, + {"34,90", "1", "3500000"}, + {"35,91", "1", "3500000"}, + {"36,92", "1", "3500000"}, + {"37,93", "1", "3500000"}, + {"38,94", "1", "3500000"}, + {"39,95", "1", "3500000"}, + {"40,96", "1", "3500000"}, + {"41,97", "1", "3500000"}, + {"42,98", "1", "3500000"}, + {"43,99", "1", "3500000"}, + {"44,100", "1", "3500000"}, + {"45,101", "1", "3500000"}, + {"46,102", "1", "3500000"}, + {"47,103", "1", "3500000"}, + {"48,104", "1", "3500000"}, + {"49,105", "1", "3500000"}, + {"50,106", "1", "3500000"}, + {"51,107", "1", "3500000"}, + {"52,108", "1", "3500000"}, + {"53,109", "1", "3500000"}, + {"54,110", "1", "3500000"}, + {"55,111", "1", "3500000"}, + }, + { + {"0-9,11-19,21-27,56-83"}, + {"28-55,84-111"}, + }, +}; LinuxCpuMapTestCase freq_2sockets_48cores_hyperthreading = { 96, 2, @@ -987,6 +1169,7 @@ TEST_P(LinuxCpuMapFreqParserTests, LinuxFreq) {} INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapFreqParserTests, testing::Values(freq_2sockets_112cores_hyperthreading, + freq_2sockets_56cores_hyperthreading, freq_2sockets_48cores_hyperthreading, freq_2sockets_48cores_hyperthreading_1, freq_2sockets_24cores_hyperthreading, From b4c81e0f39e9cd30752879e4db32487dadba7db0 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Tue, 24 Dec 2024 09:02:27 +0400 Subject: [PATCH 60/60] [TRANSFORMATIONS][GPU] SDPA Fusion passes (#28042) ### Details: - Added basic SDPA fusion pass and QK scaling fusion into SDPA T5 case --------- Signed-off-by: Vladimir Paramuzov --- .../common_optimizations/sdpa_fusion.hpp | 60 +++++ .../sdpa_scale_fusion.hpp | 58 +++++ .../moc_transformations.cpp | 2 + .../common_optimizations/sdpa_fusion.cpp | 127 ++++++++++ .../sdpa_scale_fusion.cpp | 140 +++++++++++ .../common_optimizations/sdpa_fusion_test.cpp | 234 ++++++++++++++++++ .../sdpa_scale_fusion_test.cpp | 228 +++++++++++++++++ .../transformation_pipeline.cpp | 2 + .../src/plugin/transformations_pipeline.cpp | 2 + 9 files changed, 853 insertions(+) create mode 100644 src/common/transformations/include/transformations/common_optimizations/sdpa_fusion.hpp create mode 100644 src/common/transformations/include/transformations/common_optimizations/sdpa_scale_fusion.hpp create mode 100644 src/common/transformations/src/transformations/common_optimizations/sdpa_fusion.cpp create mode 100644 src/common/transformations/src/transformations/common_optimizations/sdpa_scale_fusion.cpp create mode 100644 src/common/transformations/tests/common_optimizations/sdpa_fusion_test.cpp create mode 100644 src/common/transformations/tests/common_optimizations/sdpa_scale_fusion_test.cpp diff --git a/src/common/transformations/include/transformations/common_optimizations/sdpa_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/sdpa_fusion.hpp new file mode 100644 index 00000000000000..84383b777604ea --- /dev/null +++ b/src/common/transformations/include/transformations/common_optimizations/sdpa_fusion.hpp @@ -0,0 +1,60 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/matcher_pass.hpp" +#include "transformations_visibility.hpp" + +namespace ov { +namespace pass { + +/// This pass transforms the following sub-graph to a single Scaled Dot Product Attention operation. +/// Before: +/// ┌───────┐ ┌───────┐ ┌───────┐ +/// │ Q │ │ K │ │ V │ +/// └───┬───┘ └───┬───┘ └───┬───┘ +/// │ │ │ +/// │ │ │ +/// ┌───┴───┐ ┌─────┴──────┐ │ +/// │ MatMul│<──│ Transpose │ │ +/// └───┬───┘ | (Optional) │ │ +/// │ └────────────┘ │ +/// ┌───┴───┐ ┌─────────────┐ │ +/// │ Add │<───│AttentionMask│ │ +/// └───┬───┘ | (Optional) │ │ +/// │ └─────────────┘ │ +/// ┌───┴───┐ │ +/// │Softmax│ │ +/// └───┬───┘ │ +/// │ │ +/// ┌───┴───┐ │ +/// │ MatMul│<─────────────────────┘ +/// └───┬───┘ +/// ┌───┴───┐ +/// │ Output│ +/// └───────┘ +/// +/// After: +/// ┌───────┐ ┌───────┐ ┌───────┐ ┌─────────────┐ +/// │ Q │ │ K │ │ V │ │AttentionMask│ +/// └───┬───┘ └───┬───┘ └───┬───┘ └──────┬──────┘ +/// │ │ │ │ +/// │ │ │ │ +/// ┌───┴────────────┴────────────┴───────────────┴─┐ +/// │ ScaledDotProductAttention │ +/// └────────────────────┬──────────────────────────┘ +/// │ +/// │ +/// ┌────┴────┐ +/// │ Output │ +/// └─────────┘ +class TRANSFORMATIONS_API SDPAFusion : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("SDPAFusion", "0"); + SDPAFusion(); +}; + +} // namespace pass +} // namespace ov diff --git a/src/common/transformations/include/transformations/common_optimizations/sdpa_scale_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/sdpa_scale_fusion.hpp new file mode 100644 index 00000000000000..cae0363e785f4e --- /dev/null +++ b/src/common/transformations/include/transformations/common_optimizations/sdpa_scale_fusion.hpp @@ -0,0 +1,58 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/matcher_pass.hpp" +#include "transformations_visibility.hpp" + +namespace ov { +namespace pass { + +/// Merges explicit multiplication by scalar value for Q and K into scale attribute of SDPA op +/// Before: +/// ┌───────┐ ┌───────┐ ┌───────┐ ┌─────────────┐ ┌─────────────┐ +/// │ Q │ │ K │ │ V │ │AttentionMask│ │ Scale | +/// └───┬───┘ └───┬───┘ └───┬───┘ │ (Optional) │ │ (Optional) │ +/// │ │ │ └──────┬──────┘ └───────┬─────┘ +/// │ │ │ │ | +/// ┌───┴───┐ ┌───┴───┐ │ │ | +/// │ Mul | │ Mul │ | │ | +/// └───┬───┘ └───┬───┘ │ │ │ +/// │ │ │ │ │ +/// | │ │ │ │ +/// ┌───┴────────────┴────────────┴─────────────┴─┐ | +/// │ ScaledDotProductAttention │──────────────────┘ +/// └────────────────────┬────────────────────────┘ +/// │ +/// │ +/// ┌────┴────┐ +/// │ Output │ +/// └─────────┘ +/// After: +/// ┌───────┐ ┌───────┐ ┌───────┐ ┌─────────────┐ ┌───────┐ +/// │ Q │ │ K │ │ V │ │AttentionMask│ │ Scale | +/// └───┬───┘ └───┬───┘ └───┬───┘ └──────┬──────┘ └───┬───┘ +/// │ │ │ │ | +/// │ │ │ │ | +/// | │ │ │ | +/// ┌───┴────────────┴────────────┴─────────────┴─┐ | +/// │ ScaledDotProductAttention │───────────┘ +/// └────────────────────┬────────────────────────┘ +/// │ +/// │ +/// ┌────┴────┐ +/// │ Output │ +/// └─────────┘ +/// Multiply ops for Q and K are eliminated in the following cases: +/// 1. Q_scale and K_scale are constant +/// 2. Q_scale * SDPA_Scale == 1 or K_scale * SDPA_Scale == 1 +class TRANSFORMATIONS_API SDPAScaleFusion : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("SDPAScaleFusion", "0"); + SDPAScaleFusion(); +}; + +} // namespace pass +} // namespace ov diff --git a/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp b/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp index 185ae84ec83642..23fbf882024bdc 100644 --- a/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp @@ -65,6 +65,7 @@ #include "transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp" #include "transformations/common_optimizations/reshape_sequence_fusion.hpp" #include "transformations/common_optimizations/ric_fusion.hpp" +#include "transformations/common_optimizations/sdpa_fusion.hpp" #include "transformations/common_optimizations/select_with_one_value_condition.hpp" #include "transformations/common_optimizations/sequence_fusion.hpp" #include "transformations/common_optimizations/shared_ops_optimization.hpp" @@ -229,6 +230,7 @@ bool ov::pass::MOCTransformations::run_on_model(const std::shared_ptr ADD_MATCHER(common_fusions, ConvertTensorIteratorToSequence) ADD_MATCHER(common_fusions, SplitConcatPairToInterpolateFusion, m_use_shapes) ADD_MATCHER(common_fusions, ConvolutionToGroupConvolutionFusion) + ADD_MATCHER(common_fusions, SDPAFusion) if (m_use_shapes) { ADD_MATCHER(common_fusions, NearestNeighborUpsamplingFusion) } diff --git a/src/common/transformations/src/transformations/common_optimizations/sdpa_fusion.cpp b/src/common/transformations/src/transformations/common_optimizations/sdpa_fusion.cpp new file mode 100644 index 00000000000000..fc581580f70001 --- /dev/null +++ b/src/common/transformations/src/transformations/common_optimizations/sdpa_fusion.cpp @@ -0,0 +1,127 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/common_optimizations/sdpa_fusion.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/core/type.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/scaled_dot_product_attention.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "openvino/pass/pattern/op/optional.hpp" +#include "openvino/pass/pattern/op/pattern.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/utils/gen_pattern.hpp" + +namespace ov { +namespace pass { + +SDPAFusion::SDPAFusion() { + using namespace ov::pass::pattern; + using namespace ov::gen_pattern; + + auto q = makePattern(ov::Rank(4)); + auto k = makePattern(ov::Rank(4)); + auto v = makePattern(ov::Rank(4)); + auto mask = makePattern(); + + auto k_transpose_order = pattern::wrap_type([](const Output& node) { + auto axis_order = + std::dynamic_pointer_cast(node.get_node_shared_ptr())->cast_vector(); + return axis_order == std::vector{0, 1, 3, 2}; + }); + + auto k_t = pattern::wrap_type({k, k_transpose_order}); + auto qk_nn = makePattern({q, k_t}, {{"transpose_a", false}, {"transpose_b", false}}); + auto qk_nt = makePattern({q, k}, {{"transpose_a", false}, {"transpose_b", true}}); + auto qk = qk_nt | qk_nn; + auto optional_add_mask = optional({qk, mask}); + auto softmax = makePattern({optional_add_mask}, {{"axis", "-1"}}); + auto qkv = makePattern({softmax, v}, {{"transpose_a", false}, {"transpose_b", false}}); + + auto valid_qk_shapes = [](const std::shared_ptr& qk_matmul) { + auto q_pshape = qk_matmul->get_input_partial_shape(0); + auto k_pshape = qk_matmul->get_input_partial_shape(1); + + const size_t q_head_size_idx = 3; + const size_t k_head_size_idx = qk_matmul->get_transpose_b() ? 3 : 2; + + return q_pshape.size() == 4 && k_pshape.size() == 4 && q_pshape[q_head_size_idx].is_static() && + k_pshape[k_head_size_idx].is_static() && + q_pshape[q_head_size_idx].get_length() == k_pshape[k_head_size_idx].get_length(); + }; + + ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + if (transformation_callback(m.get_match_root())) { + return false; + } + + auto q_node = pattern_map.at(q); + auto k_node = pattern_map.at(k); + auto v_node = pattern_map.at(v); + + if (!valid_qk_shapes(ov::as_type_ptr(pattern_map.at(qk).get_node_shared_ptr()))) { + return false; + } + + if (pattern_map.at(qk).get_target_inputs().size() > 1 || + pattern_map.at(softmax).get_target_inputs().size() > 1) { + return false; + } + if (pattern_map.count(optional_add_mask) && (pattern_map.at(optional_add_mask).get_target_inputs().size() > 1 || + pattern_map.at(mask).get_partial_shape().size() > 4)) { + return false; + } + + Output mask_value; + Output mask_input; + if (pattern_map.find(optional_add_mask) != pattern_map.end()) { + mask_value = pattern_map.at(mask); + } else { + mask_value = ov::op::v0::Constant::create(q_node.get_element_type(), ov::Shape{}, std::vector{0}); + } + + if (mask_value.get_partial_shape().size() > 4) { + return false; + } + + if (mask_value.get_partial_shape().rank() == 0 || mask_value.get_partial_shape().rank() == 4) { + mask_input = mask_value; + } else { + size_t rank_diff = q_node.get_partial_shape().size() - mask_value.get_partial_shape().size(); + std::vector axes(rank_diff); + std::iota(axes.begin(), axes.end(), 0); + mask_input = std::make_shared( + mask_value, + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{rank_diff}, axes)); + } + + std::shared_ptr scale_node = + ov::op::v0::Constant::create(q_node.get_element_type(), ov::Shape{}, std::vector{1.0f}); + + std::shared_ptr sdpa = std::make_shared(q_node, + k_node, + v_node, + mask_input, + scale_node, + false); + + sdpa->set_friendly_name(m.get_match_root()->get_friendly_name()); + ov::copy_runtime_info(m.get_matched_nodes(), sdpa); + ov::replace_node(m.get_match_root(), sdpa); + + return true; + }; + + auto m = std::make_shared(qkv, "SDPAFusion"); + this->register_matcher(m, callback); +} + +} // namespace pass +} // namespace ov diff --git a/src/common/transformations/src/transformations/common_optimizations/sdpa_scale_fusion.cpp b/src/common/transformations/src/transformations/common_optimizations/sdpa_scale_fusion.cpp new file mode 100644 index 00000000000000..3d750fe38a868e --- /dev/null +++ b/src/common/transformations/src/transformations/common_optimizations/sdpa_scale_fusion.cpp @@ -0,0 +1,140 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/common_optimizations/sdpa_scale_fusion.hpp" + +#include + +#include "openvino/core/node.hpp" +#include "openvino/core/rt_info.hpp" +#include "openvino/core/type.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/scaled_dot_product_attention.hpp" +#include "openvino/pass/pattern/op/optional.hpp" +#include "openvino/pass/pattern/op/pattern.hpp" +#include "transformations/utils/gen_pattern.hpp" + +namespace ov { +namespace pass { + +SDPAScaleFusion::SDPAScaleFusion() { + using namespace ov::pass::pattern; + using namespace ov::gen_pattern; + + auto q = makePattern(ov::Rank(4)); + auto k = makePattern(ov::Rank(4)); + auto v = makePattern(ov::Rank(4)); + auto mask = makePattern(); + auto sdpa_scale = makeConst({}); + auto scale_q = makePattern("[]") | makePattern("[1]"); + auto scale_k = makePattern("[]") | makePattern("[1]"); + + auto scaled_q = optional({q, scale_q}); + auto scaled_k = optional({k, scale_k}); + auto sdpa_mask_scale = + makePattern({scaled_q, scaled_k, v, mask, sdpa_scale}, + {{"causal", false}}); + auto sdpa_mask = + makePattern({scaled_q, scaled_k, v, mask}, {{"causal", false}}); + auto sdpa_simple = + makePattern({scaled_q, scaled_k, v}, {{"causal", false}}); + auto sdpa = sdpa_simple | sdpa_mask | sdpa_mask_scale; + + ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + if (transformation_callback(m.get_match_root())) { + return false; + } + + auto sdpa = m.get_match_root(); + + const bool has_q_scale = pattern_map.count(scaled_q); + const bool has_k_scale = pattern_map.count(scaled_k); + + // Nothing to do + if (!has_q_scale && !has_k_scale) + return false; + + auto prev_scale_value = 1.0f; + auto scale_q_value = 1.0f; + auto scale_k_value = 1.0f; + auto scale_et = sdpa->get_output_element_type(0); + + Output q_input = sdpa->get_input_source_output(0); + Output k_input = sdpa->get_input_source_output(1); + + std::shared_ptr scale_q_node = nullptr; + std::shared_ptr scale_k_node = nullptr; + + if (pattern_map.find(sdpa_scale) != pattern_map.end()) { + auto prev_scale_node = + ov::as_type_ptr(pattern_map.at(sdpa_scale).get_node_shared_ptr()); + prev_scale_value = prev_scale_node->cast_vector()[0]; + scale_et = prev_scale_node->get_output_element_type(0); + } else { + auto head_size = q_input.get_partial_shape()[3]; + if (head_size.is_dynamic()) + return false; + + prev_scale_value = 1.0f / std::sqrt(static_cast(head_size.get_length())); + } + + // Extract scalar scale values for Q and K if those are constant and set new inputs for SDPA + if (has_q_scale) { + scale_q_node = pattern_map.at(scale_q).get_node_shared_ptr(); + if (ov::is_type(scale_q_node)) { + scale_q_value = ov::as_type_ptr(scale_q_node)->cast_vector()[0]; + q_input = pattern_map.at(q); + } + } + if (has_k_scale) { + scale_k_node = pattern_map.at(scale_k).get_node_shared_ptr(); + if (ov::is_type(scale_k_node)) { + scale_k_value = ov::as_type_ptr(scale_k_node)->cast_vector()[0]; + k_input = pattern_map.at(k); + } + } + + Output new_scale_node; + auto new_scale_val = prev_scale_value * scale_q_value * scale_k_value; + + // If new scale is 1 and we have non-constant scale node for either Q or K, then we can make it a scale of SDPA + if (new_scale_val == 1.0f) { + if (has_q_scale && !ov::is_type(scale_q_node)) { + new_scale_node = pattern_map.at(scale_q); + q_input = pattern_map.at(q); + } else if (has_k_scale && !ov::is_type(scale_k_node)) { + new_scale_node = pattern_map.at(scale_k); + k_input = pattern_map.at(k); + } else { + new_scale_node = ov::op::v0::Constant::create(scale_et, ov::Shape{}, std::vector{new_scale_val}); + } + } else { + new_scale_node = ov::op::v0::Constant::create(scale_et, ov::Shape{}, std::vector{new_scale_val}); + } + + OutputVector new_inputs = {q_input, k_input, pattern_map.at(v)}; + if (pattern_map.find(mask) != pattern_map.end()) { + new_inputs.push_back(pattern_map.at(mask)); + } else { + new_inputs.push_back( + ov::op::v0::Constant::create(new_scale_node.get_element_type(), ov::Shape{}, std::vector{0.0f})); + } + + new_inputs.push_back(new_scale_node); + + auto new_sdpa = sdpa->clone_with_new_inputs(new_inputs); + new_sdpa->set_friendly_name(sdpa->get_friendly_name()); + ov::copy_runtime_info(sdpa, new_sdpa); + ov::replace_node(sdpa, new_sdpa); + + return true; + }; + + auto m = std::make_shared(sdpa, "SDPAScaleFusion"); + this->register_matcher(m, callback); +} + +} // namespace pass +} // namespace ov diff --git a/src/common/transformations/tests/common_optimizations/sdpa_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/sdpa_fusion_test.cpp new file mode 100644 index 00000000000000..52c10ba5967bd8 --- /dev/null +++ b/src/common/transformations/tests/common_optimizations/sdpa_fusion_test.cpp @@ -0,0 +1,234 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ov_test_utils.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/transpose.hpp" + +using namespace testing; +using namespace ov::pass; +using namespace ov; + +TEST_F(TransformationTestsF, SDPAFusionTest1) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f32, query_shape); + const auto key = std::make_shared(element::f32, key_shape); + const auto value = std::make_shared(element::f32, value_shape); + const auto casual = false; + { + const auto qk = std::make_shared(query, key, false, true); + const auto softmax = std::make_shared(qk, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + { + const auto scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{1.0f}); + const auto mask_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{0.0f}); + const auto sdpa = std::make_shared(query, + key, + value, + mask_const, + scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAFusionTest2) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f16, query_shape); + const auto key = std::make_shared(element::f16, key_shape); + const auto value = std::make_shared(element::f16, value_shape); + const auto casual = false; + { + const auto qk = std::make_shared(query, key, false, true); + const auto softmax = std::make_shared(qk, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + { + const auto scale_const = ov::op::v0::Constant::create(element::f16, ov::Shape{}, std::vector{1.0f}); + const auto mask_const = ov::op::v0::Constant::create(element::f16, ov::Shape{}, std::vector{0.0f}); + const auto sdpa = std::make_shared(query, + key, + value, + mask_const, + scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAFusionTest3) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f16, query_shape); + const auto key = std::make_shared(element::f16, key_shape); + const auto value = std::make_shared(element::f16, value_shape); + const auto casual = false; + { + const auto key_t = + std::make_shared(key, + op::v0::Constant::create(element::i64, Shape{4}, {0, 1, 3, 2})); + const auto qk = std::make_shared(query, key_t, false, false); + const auto softmax = std::make_shared(qk, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + { + const auto scale_const = ov::op::v0::Constant::create(element::f16, ov::Shape{}, std::vector{1.0f}); + const auto mask_const = ov::op::v0::Constant::create(element::f16, ov::Shape{}, std::vector{0.0f}); + const auto sdpa = std::make_shared(query, + key, + value, + mask_const, + scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAFusionTest4) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, 32, -1}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f16, query_shape); + const auto key = std::make_shared(element::f16, key_shape); + const auto value = std::make_shared(element::f16, value_shape); + { + const auto qk = std::make_shared(query, key, false, false); + const auto softmax = std::make_shared(qk, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + model_ref = model->clone(); + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAFusionTest5) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + const PartialShape attention_mask_shape{1, 32, -1, -1}; + + const auto query = std::make_shared(element::f16, query_shape); + const auto key = std::make_shared(element::f16, key_shape); + const auto value = std::make_shared(element::f16, value_shape); + const auto mask = std::make_shared(element::f16, attention_mask_shape); + const auto casual = false; + { + const auto qk = std::make_shared(query, key, false, true); + const auto mask_add = std::make_shared(qk, mask); + const auto softmax = std::make_shared(mask_add, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value, mask}); + manager.register_pass(); + } + + { + const auto scale_const = ov::op::v0::Constant::create(element::f16, ov::Shape{}, std::vector{1.0f}); + const auto sdpa = + std::make_shared(query, key, value, mask, scale_const, casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value, mask}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAFusionTest6) { + const PartialShape query_shape{1, 32, 10, 32}; + const PartialShape key_shape{1, 32, 10, 32}; + const PartialShape value_shape{1, 32, 10, 32}; + const PartialShape attention_mask_shape{1, 1, 10, 10}; + + const auto query = std::make_shared(element::f16, query_shape); + const auto key = std::make_shared(element::f16, key_shape); + const auto value = std::make_shared(element::f16, value_shape); + const auto mask = std::make_shared(element::f16, attention_mask_shape); + const auto casual = false; + { + const auto qk = std::make_shared(query, key, false, true); + const auto mask_add = std::make_shared(qk, mask); + const auto softmax = std::make_shared(mask_add, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value, mask}); + manager.register_pass(); + } + + { + const auto scale_const = ov::op::v0::Constant::create(element::f16, ov::Shape{}, std::vector{1.0f}); + const auto sdpa = + std::make_shared(query, key, value, mask, scale_const, casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value, mask}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAFusionTest7) { + const PartialShape query_shape{1, 8, -1, 32}; + const PartialShape key_shape{-1, 1, 8, 32}; + const PartialShape value_shape{1, 8, -1, 32}; + + const auto query = std::make_shared(element::f16, query_shape); + const auto key = std::make_shared(element::f16, key_shape); + const auto value = std::make_shared(element::f16, value_shape); + { + const auto key_t = + std::make_shared(key, + op::v0::Constant::create(element::i64, Shape{4}, {1, 2, 3, 0})); + const auto qk = std::make_shared(query, key_t, false, false); + const auto softmax = std::make_shared(qk, -1); + const auto qkv = std::make_shared(softmax, value, false, false); + + model = std::make_shared(NodeVector{qkv}, ParameterVector{query, key, value}); + manager.register_pass(); + } +} diff --git a/src/common/transformations/tests/common_optimizations/sdpa_scale_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/sdpa_scale_fusion_test.cpp new file mode 100644 index 00000000000000..f922f030a9c43b --- /dev/null +++ b/src/common/transformations/tests/common_optimizations/sdpa_scale_fusion_test.cpp @@ -0,0 +1,228 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ov_test_utils.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/scaled_dot_product_attention.hpp" + +using namespace testing; +using namespace ov::pass; +using namespace ov; + +TEST_F(TransformationTestsF, SDPAScaleFusionTest1) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f32, query_shape); + const auto key = std::make_shared(element::f32, key_shape); + const auto value = std::make_shared(element::f32, value_shape); + const auto scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{8.0f}); + const auto v_scaled = std::make_shared(value, scale_const); + const auto casual = false; + { + const auto q_scaled = std::make_shared(query, scale_const); + const auto k_scaled = std::make_shared(key, scale_const); + const auto sdpa = + std::make_shared(q_scaled, k_scaled, v_scaled, casual); + + model = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + { + const auto new_mask_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{0.0f}); + const auto new_scale_const = + ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{64.0f / std::sqrt(32.0f)}); + const auto sdpa = std::make_shared(query, + key, + v_scaled, + new_mask_const, + new_scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAScaleFusionTest2) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f32, query_shape); + const auto key = std::make_shared(element::f32, key_shape); + const auto value = std::make_shared(element::f32, value_shape); + const auto sdpa_mask_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{0.0f}); + const auto sdpa_scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{2.0f}); + const auto scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{8.0f}); + const auto v_scaled = std::make_shared(value, scale_const); + const auto casual = false; + { + const auto q_scaled = std::make_shared(query, scale_const); + const auto k_scaled = std::make_shared(key, scale_const); + const auto sdpa = std::make_shared(q_scaled, + k_scaled, + v_scaled, + sdpa_mask_const, + sdpa_scale_const, + casual); + + model = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + { + const auto new_scale_const = + ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{128.0f}); + const auto sdpa = std::make_shared(query, + key, + v_scaled, + sdpa_mask_const, + new_scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAScaleFusionTest3) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f32, query_shape); + const auto key = std::make_shared(element::f32, key_shape); + const auto value = std::make_shared(element::f32, value_shape); + const auto sdpa_mask_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{0.0f}); + const auto sdpa_scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{2.0f}); + const auto scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{8.0f}); + const auto v_scaled = std::make_shared(value, scale_const); + const auto casual = false; + { + const auto q_scaled = std::make_shared(query, scale_const); + const auto sdpa = std::make_shared(q_scaled, + key, + v_scaled, + sdpa_mask_const, + sdpa_scale_const, + casual); + + model = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + manager.register_pass(); + } + + { + const auto new_scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{16.0f}); + const auto sdpa = std::make_shared(query, + key, + v_scaled, + sdpa_mask_const, + new_scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAScaleFusionTest4) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f32, query_shape); + const auto key = std::make_shared(element::f32, key_shape); + const auto value = std::make_shared(element::f32, value_shape); + const auto sdpa_mask_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{0.0f}); + const auto sdpa_scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{2.0f}); + const auto scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{8.0f}); + const auto scale_dyn = std::make_shared(element::f32, ov::Shape{}); + const auto v_scaled = std::make_shared(value, scale_const); + const auto casual = false; + const auto q_scaled = std::make_shared(query, scale_dyn); + { + const auto k_scaled = std::make_shared(key, scale_const); + const auto sdpa = std::make_shared(q_scaled, + k_scaled, + v_scaled, + sdpa_mask_const, + sdpa_scale_const, + casual); + + model = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value, scale_dyn}); + manager.register_pass(); + } + + { + const auto new_scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{16.0f}); + const auto sdpa = std::make_shared(q_scaled, + key, + v_scaled, + sdpa_mask_const, + new_scale_const, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value, scale_dyn}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, SDPAScaleFusionTest5) { + const PartialShape query_shape{1, 32, -1, 32}; + const PartialShape key_shape{1, 32, -1, 32}; + const PartialShape value_shape{1, 32, -1, 32}; + + const auto query = std::make_shared(element::f32, query_shape); + const auto key = std::make_shared(element::f32, key_shape); + const auto value = std::make_shared(element::f32, value_shape); + const auto sdpa_mask_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{0.0f}); + const auto sdpa_scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{1.0f}); + const auto scale_const = ov::op::v0::Constant::create(element::f32, ov::Shape{}, std::vector{1.0f}); + const auto scale_dyn = std::make_shared(element::f32, ov::Shape{}); + const auto v_scaled = std::make_shared(value, scale_const); + const auto casual = false; + { + const auto q_scaled = std::make_shared(query, scale_dyn); + const auto k_scaled = std::make_shared(key, scale_const); + const auto sdpa = std::make_shared(q_scaled, + k_scaled, + v_scaled, + sdpa_mask_const, + sdpa_scale_const, + casual); + + model = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value, scale_dyn}); + manager.register_pass(); + } + + { + const auto sdpa = std::make_shared(query, + key, + v_scaled, + sdpa_mask_const, + scale_dyn, + casual); + model_ref = std::make_shared(NodeVector{sdpa}, ParameterVector{query, key, value, scale_dyn}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index a63377312ecb95..fb9e0925bc89e2 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -37,6 +37,7 @@ #include "transformations/common_optimizations/nop_elimination.hpp" #include "transformations/common_optimizations/reshape_prelu.hpp" #include "transformations/common_optimizations/rms_fusion.hpp" +#include "transformations/common_optimizations/sdpa_fusion.hpp" #include "transformations/common_optimizations/transpose_sinking.hpp" #include "transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp" #include "transformations/common_optimizations/wrap_interpolate_into_transposes.hpp" @@ -695,6 +696,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_DISABLE_PASS_COMMON(manager, ov::pass::MatMulConstTransposesExtraction); CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertScatterNDUpdate15ToScatterNDUpdate3); CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertSliceScatter); + CPU_DISABLE_PASS_COMMON(manager, ov::pass::SDPAFusion); CPU_DISABLE_PASS_X64(manager, ov::pass::HSigmoidDecomposition); CPU_DISABLE_PASS_X64(manager, ov::pass::ReduceL1Decomposition); diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 53ab9aa188b7aa..7c7c09adcd182f 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -92,6 +92,7 @@ #include "transformations/common_optimizations/lstm_cell_fusion.hpp" #include "transformations/common_optimizations/move_eltwise_up_data_movement.hpp" #include "transformations/common_optimizations/mvn_fusion.hpp" +#include "transformations/common_optimizations/sdpa_scale_fusion.hpp" #include "transformations/common_optimizations/softmax_fusion.hpp" #include "transformations/common_optimizations/glu_fusion.hpp" #include "transformations/common_optimizations/transpose_sinking.hpp" @@ -941,6 +942,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { if (!disable_horizontal_fc_fusion) manager.register_pass(); + manager.register_pass(); manager.register_pass(); auto pass_config = manager.get_pass_config(); manager.register_pass();