From d81c97ffef4c0a14a8f51413e1f07b05e65766e5 Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Tue, 12 Mar 2024 21:15:28 +0100 Subject: [PATCH] [Transformations] Make ov::ModelPass transformations execute recursively (#23058) [Transformations] Make ov::ModelPass transformations execute recursively ### Details: Some ov::ModelPass transformations lack recursive execution for subgraphs leaving it not processed. Add the required recursive call for MultiSubGraphOp operations. ### Tickets: Ticket: CVS-116659 Signed-off-by: Andrii Staikov Signed-off-by: Andrii Staikov --- .../low_precision/propagate_shared_value.hpp | 3 ++ .../snippets/src/pass/propagate_precision.cpp | 3 ++ .../include/transformations/utils/utils.hpp | 2 + .../fused_names_cleanup.cpp | 3 ++ .../optimize_strided_slice.cpp | 30 +++++-------- .../reverse_shape_and_type_infer.cpp | 3 ++ .../shared_ops_optimization.cpp | 9 ++-- .../control_flow/unroll_tensor_iterator.cpp | 2 + .../src/transformations/convert_precision.cpp | 42 ++++++++++--------- .../align_mixed_fp32_fp16_types.cpp | 4 +- .../src/transformations/init_node_info.cpp | 8 ++-- .../smart_reshape/lstm_states_broadcast.cpp | 16 ++++--- .../label_optimization.cpp | 6 +-- .../symbolic_optimizations.cpp | 6 +-- .../src/transformations/utils/utils.cpp | 15 +++++++ src/core/src/pass/constant_folding.cpp | 5 ++- src/core/src/pass/low_latency.cpp | 3 ++ .../snippets/x64/pass/enforce_precision.cpp | 3 ++ .../intel_cpu/src/utils/print_model.hpp | 7 +++- 19 files changed, 100 insertions(+), 70 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp index 305f63e34f7b63..64c6c2473e810a 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp @@ -44,6 +44,9 @@ class ov::pass::low_precision::PropagateSharedValue : public ov::pass::ModelPass std::vector> nodes(f->get_ordered_ops()); for (auto it = nodes.begin(); it != nodes.end(); it++) { const std::shared_ptr node = *it; + + ov::op::util::process_subgraph(*this, node); + if (ov::is_type(node)) { assert(node->get_output_size() == 1ul); auto& outputRtInfo = node->output(0).get_rt_info(); diff --git a/src/common/snippets/src/pass/propagate_precision.cpp b/src/common/snippets/src/pass/propagate_precision.cpp index 6660b5db6eb629..73e68733d541fd 100644 --- a/src/common/snippets/src/pass/propagate_precision.cpp +++ b/src/common/snippets/src/pass/propagate_precision.cpp @@ -8,6 +8,7 @@ #include "snippets/itt.hpp" #include "snippets/utils.hpp" #include "openvino/core/rt_info.hpp" +#include "transformations/utils/utils.hpp" #include #include @@ -29,6 +30,8 @@ bool ov::snippets::pass::PropagatePrecision::run_on_model(const std::shared_ptr< bool was_updated = false; for (const auto& op : f->get_ordered_ops()) { + ov::op::util::process_subgraph(*this, op); + auto type_info = op->get_type_info(); auto exec = target_machine->get_supported_precisions(type_info); const auto& supported_precisions = exec(op); diff --git a/src/common/transformations/include/transformations/utils/utils.hpp b/src/common/transformations/include/transformations/utils/utils.hpp index f1e270823ba1f4..e60243b60e6028 100644 --- a/src/common/transformations/include/transformations/utils/utils.hpp +++ b/src/common/transformations/include/transformations/utils/utils.hpp @@ -276,6 +276,8 @@ TRANSFORMATIONS_API bool is_constant_and_all_values_equal_int(const Output TRANSFORMATIONS_API bool is_on_constant_path(const ov::Output& output); +TRANSFORMATIONS_API bool process_subgraph(ov::pass::ModelPass& model_pass, const std::shared_ptr& node); + template ov::pass::pattern::op::ValuePredicate constant_predicate(std::function&)> predicate) { return pass::pattern::op::as_value_predicate([=](std::shared_ptr n) -> bool { diff --git a/src/common/transformations/src/transformations/common_optimizations/fused_names_cleanup.cpp b/src/common/transformations/src/transformations/common_optimizations/fused_names_cleanup.cpp index c3f602ebcacd91..6dc3e68dfc3cb4 100644 --- a/src/common/transformations/src/transformations/common_optimizations/fused_names_cleanup.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/fused_names_cleanup.cpp @@ -6,11 +6,14 @@ #include "openvino/cc/pass/itt.hpp" #include "transformations/rt_info/fused_names_attribute.hpp" +#include "transformations/utils/utils.hpp" bool ov::pass::FusedNamesCleanup::run_on_model(const std::shared_ptr& f) { RUN_ON_FUNCTION_SCOPE(FusedNamesCleanup); for (auto& node : f->get_ordered_ops()) { + ov::op::util::process_subgraph(*this, node); + RTMap& rt_info = node->get_rt_info(); auto it = rt_info.find(ov::FusedNames::get_type_info_static()); if (it != rt_info.end()) { diff --git a/src/common/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp b/src/common/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp index a7599a96fd277e..253004f694113e 100644 --- a/src/common/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp @@ -19,6 +19,7 @@ #include "openvino/pass/manager.hpp" #include "transformations/common_optimizations/shared_ops_optimization.hpp" #include "transformations/op_conversions/convert_slice_to_strided_slice.hpp" +#include "transformations/utils/utils.hpp" using namespace ov; @@ -27,11 +28,8 @@ bool ov::pass::UselessSliceEraser::run_on_model(const std::shared_ptr bool rewritten = false; for (auto& node : f->get_ordered_ops()) { // Recursively apply transformation for sub-graph based operations - if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { - if (auto sub_graph = sub_graph_node->get_function()) { - rewritten |= run_on_model(sub_graph); - } - } + rewritten = ov::op::util::process_subgraph(*this, node) || rewritten; + bool is_slice = ov::is_type(node) || ov::is_type(node); if (!is_slice || node->get_output_partial_shape(0).is_dynamic() || node->get_input_partial_shape(0).is_dynamic()) @@ -45,7 +43,7 @@ bool ov::pass::UselessSliceEraser::run_on_model(const std::shared_ptr if (!std::any_of(strides.begin(), strides.end(), [](int64_t strd) { return strd < 0; })) { - rewritten |= replace_output_update_name(node->output(0), node->input_value(0)); + rewritten = replace_output_update_name(node->output(0), node->input_value(0)) || rewritten; } } } @@ -102,11 +100,8 @@ bool ov::pass::GroupedStridedSliceOptimizer::run_on_model(const std::shared_ptr< std::map, std::vector> source_to_ss_with_plan; for (const auto& node : f->get_ordered_ops()) { // Recursively apply transformation for sub-graph based operations - if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { - if (auto sub_graph = sub_graph_node->get_function()) { - graph_rewritten |= run_on_model(sub_graph); - } - } + graph_rewritten = ov::op::util::process_subgraph(*this, node) || graph_rewritten; + if (auto ss = std::dynamic_pointer_cast(node)) { auto slice_plan = get_slice_plan(ss); if (slice_plan == op::util::SlicePlan()) @@ -291,12 +286,8 @@ bool ov::pass::GroupedSliceToVSplitOptimization::run_on_model(const std::shared_ std::vector ordered_outputs; for (const auto& node : model->get_ordered_ops()) { // Recursively apply transformation for sub-graph based operations - if (auto multi_subgraph_op = std::dynamic_pointer_cast(node)) { - for (const auto& sub_graph : multi_subgraph_op->get_functions()) { - if (sub_graph) - graph_rewritten |= run_on_model(sub_graph); - } - } + graph_rewritten = ov::op::util::process_subgraph(*this, node) || graph_rewritten; + if (auto op = ov::as_type_ptr(node)) { SliceAttrs attributes{}; if (slice_is_suitable_for_optimization(op, attributes)) { @@ -365,8 +356,9 @@ bool ov::pass::GroupedSliceToVSplitOptimization::run_on_model(const std::shared_ auto i = 0; for (auto& slice_with_attrs : attributes) { - graph_rewritten |= - ov::replace_output_update_name(slice_with_attrs.slice->output(0), variadic_split->output(i)); + graph_rewritten = + ov::replace_output_update_name(slice_with_attrs.slice->output(0), variadic_split->output(i)) || + graph_rewritten; ov::copy_runtime_info(slice_with_attrs.slice, variadic_split); ++i; } diff --git a/src/common/transformations/src/transformations/common_optimizations/reverse_shape_and_type_infer.cpp b/src/common/transformations/src/transformations/common_optimizations/reverse_shape_and_type_infer.cpp index b8224184b28731..e277efb5fc9580 100644 --- a/src/common/transformations/src/transformations/common_optimizations/reverse_shape_and_type_infer.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/reverse_shape_and_type_infer.cpp @@ -21,6 +21,7 @@ #include "openvino/op/util/binary_elementwise_arithmetic.hpp" #include "openvino/op/util/pad_base.hpp" #include "openvino/op/util/unary_elementwise_arithmetic.hpp" +#include "transformations/utils/utils.hpp" bool ov::pass::ReverseShapeAndTypeInfer::inherit_output_shape(const std::shared_ptr& node, const std::vector& input_idxs) { @@ -70,6 +71,8 @@ bool ov::pass::ReverseShapeAndTypeInfer::run_on_model(const std::shared_ptrget_ordered_ops(); for (auto it = ops.rbegin(); it != ops.rend(); ++it) { const auto& op = *it; + is_changed = ov::op::util::process_subgraph(*this, op) || is_changed; + auto output_shape = op->get_output_partial_shape(0); auto output_type = op->get_output_element_type(0); if (const auto& param = std::dynamic_pointer_cast(op)) { diff --git a/src/common/transformations/src/transformations/common_optimizations/shared_ops_optimization.cpp b/src/common/transformations/src/transformations/common_optimizations/shared_ops_optimization.cpp index 55a61af60a4dab..320e1b49468255 100644 --- a/src/common/transformations/src/transformations/common_optimizations/shared_ops_optimization.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/shared_ops_optimization.cpp @@ -109,7 +109,7 @@ bool shared_node_optimization(const shared_ptr& model) { if (auto multi_subgraph_op = dynamic_pointer_cast(op)) { for (const auto& sub_graph : multi_subgraph_op->get_functions()) { if (sub_graph) - rewritten |= shared_node_optimization(sub_graph); + rewritten = shared_node_optimization(sub_graph) || rewritten; } } for (auto& output : op->outputs()) { @@ -136,7 +136,8 @@ bool shared_node_optimization(const shared_ptr& model) { continue; const auto& child_op = shared_nodes[j]; if (nodes_are_equal(root_op, child_op)) { - rewritten |= replace_output_update_name(child_op->output(0), root_op->output(0)); + rewritten = + replace_output_update_name(child_op->output(0), root_op->output(0)) || rewritten; visited_nodes[j] = true; } } @@ -154,7 +155,7 @@ bool shape_of_upgrade(const shared_ptr& model) { if (auto multi_subgraph_op = dynamic_pointer_cast(op)) { for (const auto& sub_graph : multi_subgraph_op->get_functions()) { if (sub_graph) - rewritten |= shape_of_upgrade(sub_graph); + rewritten = shape_of_upgrade(sub_graph) || rewritten; } } else if (auto v1_shape_of = ov::as_type_ptr(op)) { auto v3_shape_of = std::make_shared(v1_shape_of->input_value(0), element::i64); @@ -171,6 +172,6 @@ bool pass::SharedOpOptimization::run_on_model(const shared_ptr& model) { RUN_ON_FUNCTION_SCOPE(SharedOpOptimization); bool rewritten = shape_of_upgrade(model); - rewritten |= shared_node_optimization(model); + rewritten = shared_node_optimization(model) || rewritten; return rewritten; } diff --git a/src/common/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp b/src/common/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp index dbddf574292998..099623fc9894c4 100644 --- a/src/common/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp +++ b/src/common/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp @@ -23,6 +23,8 @@ bool ov::pass::UnrollTensorIterator::run_on_model(const std::shared_ptr& f) { RUN_ON_FUNCTION_SCOPE(UnrollTensorIterator); for (const auto& op : f->get_ops()) { + ov::op::util::process_subgraph(*this, op); + auto sub_graph_op = std::dynamic_pointer_cast(op); if (!sub_graph_op || transformation_callback(sub_graph_op)) { continue; diff --git a/src/common/transformations/src/transformations/convert_precision.cpp b/src/common/transformations/src/transformations/convert_precision.cpp index 8d67b73aeb9f2d..e3f149f958e6b3 100644 --- a/src/common/transformations/src/transformations/convert_precision.cpp +++ b/src/common/transformations/src/transformations/convert_precision.cpp @@ -231,18 +231,18 @@ bool convert_function_precision(const std::shared_ptr& f, for (auto& node : ops) { if (skip_precision_sensitive && fp16_compression_is_disabled(node) && has_fp16_compression) continue; - is_changed |= convert_node_input_precision(node, precisions, type_to_extend); + is_changed = convert_node_input_precision(node, precisions, type_to_extend) || is_changed; } for (const auto& param : f->get_parameters()) { if (skip_precision_sensitive && fp16_compression_is_disabled(param) && has_fp16_compression) continue; - is_changed |= fuse_type_to_parameter(param, precisions, convert_input_output_precision); + is_changed = fuse_type_to_parameter(param, precisions, convert_input_output_precision) || is_changed; } if (convert_input_output_precision || store_original_precision_as_rt_attribute) { for (const auto& variable : f->get_variables()) { - is_changed |= fuse_type_to_variable(variable, precisions); + is_changed = fuse_type_to_variable(variable, precisions) || is_changed; } } @@ -272,17 +272,18 @@ bool convert_function_precision(const std::shared_ptr& f, if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { size_t sub_graphs_num = sub_graph_node->get_internal_subgraphs_size(); for (size_t sub_graph_ind = 0; sub_graph_ind < sub_graphs_num; ++sub_graph_ind) { - is_changed |= convert_function_precision(sub_graph_node->get_function(static_cast(sub_graph_ind)), - type_to_fuse, - type_to_extend, - precisions, - const_to_internal_output, - has_fp16_compression, - skip_precision_sensitive, - is_changed || is_output_precision_changed, - true, - true, - store_original_precision_as_rt_attribute); + is_changed = convert_function_precision(sub_graph_node->get_function(static_cast(sub_graph_ind)), + type_to_fuse, + type_to_extend, + precisions, + const_to_internal_output, + has_fp16_compression, + skip_precision_sensitive, + is_changed || is_output_precision_changed, + true, + true, + store_original_precision_as_rt_attribute) || + is_changed; } } // if convert_input_output_precision flag is set, we don't need to preserve the original precision @@ -293,16 +294,17 @@ bool convert_function_precision(const std::shared_ptr& f, node->revalidate_and_infer_types(); continue; } - is_output_precision_changed |= convert_node_output_precision(node, - precisions, - type_to_fuse, - const_to_internal_output, - is_changed || is_output_precision_changed); + is_output_precision_changed = convert_node_output_precision(node, + precisions, + type_to_fuse, + const_to_internal_output, + is_changed || is_output_precision_changed) || + is_output_precision_changed; } if (is_output_precision_changed) { ops = f->get_ordered_ops(); - is_changed |= is_output_precision_changed; + is_changed = is_output_precision_changed || is_changed; } if (!is_subgraph) { diff --git a/src/common/transformations/src/transformations/fp16_compression/align_mixed_fp32_fp16_types.cpp b/src/common/transformations/src/transformations/fp16_compression/align_mixed_fp32_fp16_types.cpp index 990f85fc6eea80..5281f68f39343a 100644 --- a/src/common/transformations/src/transformations/fp16_compression/align_mixed_fp32_fp16_types.cpp +++ b/src/common/transformations/src/transformations/fp16_compression/align_mixed_fp32_fp16_types.cpp @@ -88,8 +88,8 @@ bool ov::pass::AlignMixedFP32FP16Types::run_on_model(const std::shared_ptr& f) { RUN_ON_FUNCTION_SCOPE(InitNodeInfo); for (auto& node : f->get_ops()) { // Recursively apply transformation for sub-graph based operations - if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { - if (auto sub_graph = sub_graph_node->get_function()) { - run_on_model(sub_graph); - } - } + ov::op::util::process_subgraph(*this, node); + auto& rtInfo = node->get_rt_info(); rtInfo.emplace(FusedNames::get_type_info_static(), FusedNames{node->get_friendly_name()}); } diff --git a/src/common/transformations/src/transformations/smart_reshape/lstm_states_broadcast.cpp b/src/common/transformations/src/transformations/smart_reshape/lstm_states_broadcast.cpp index 0a2340d4620356..28b65cf9add1a6 100644 --- a/src/common/transformations/src/transformations/smart_reshape/lstm_states_broadcast.cpp +++ b/src/common/transformations/src/transformations/smart_reshape/lstm_states_broadcast.cpp @@ -135,11 +135,11 @@ bool relax_batch_for_initial_states_of_lstm_in_ti(const shared_ptr(lstm_cell->get_input_node_shared_ptr(1))) { auto outer_init_hidden_state_input = get_outer_input_of_ti_by_parameter(init_hidden_state, ti); - rewritten |= broadcast_state_by_batch(outer_init_hidden_state_input, batch_delivering_node); + rewritten = broadcast_state_by_batch(outer_init_hidden_state_input, batch_delivering_node) || rewritten; } if (auto init_cell_state = dynamic_pointer_cast(lstm_cell->get_input_node_shared_ptr(2))) { auto outer_init_cell_state_input = get_outer_input_of_ti_by_parameter(init_cell_state, ti); - rewritten |= broadcast_state_by_batch(outer_init_cell_state_input, batch_delivering_node); + rewritten = broadcast_state_by_batch(outer_init_cell_state_input, batch_delivering_node) || rewritten; } return rewritten; } @@ -151,8 +151,8 @@ bool relax_batch_for_initial_states_of_lstm(const shared_ptr(batched_shape, ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {0}), ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0})); - rewritten |= broadcast_state_by_batch(lstm_cell->input(1), batch_delivering_node); - rewritten |= broadcast_state_by_batch(lstm_cell->input(2), batch_delivering_node); + rewritten = broadcast_state_by_batch(lstm_cell->input(1), batch_delivering_node) || rewritten; + rewritten = broadcast_state_by_batch(lstm_cell->input(2), batch_delivering_node) || rewritten; return rewritten; } @@ -163,13 +163,11 @@ bool ov::pass::LSTMStatesBroadcast::run_on_model(const shared_ptr& f) bool rewritten = false; for (auto& node : f->get_ordered_ops()) { // Recursively apply transformation for sub-graph based operations - if (const auto& sub_graph_node = dynamic_pointer_cast(node)) - if (const auto& sub_graph = sub_graph_node->get_function()) - rewritten |= run_on_model(sub_graph); + rewritten = ov::op::util::process_subgraph(*this, node) || rewritten; // Case without TI (LSTMCell and Constant are in the same ov::Model) if (const auto& lstm_cell = dynamic_pointer_cast(node)) - rewritten |= relax_batch_for_initial_states_of_lstm(lstm_cell); + rewritten = relax_batch_for_initial_states_of_lstm(lstm_cell) || rewritten; // Case with TI (LSTMCell and Constant are in different ov::Model objects) if (auto ti = dynamic_pointer_cast(node)) { @@ -178,7 +176,7 @@ bool ov::pass::LSTMStatesBroadcast::run_on_model(const shared_ptr& f) continue; for (const auto& body_node : body->get_ordered_ops()) if (const auto& lstm_cell = dynamic_pointer_cast(body_node)) - rewritten |= relax_batch_for_initial_states_of_lstm_in_ti(ti, lstm_cell); + rewritten = relax_batch_for_initial_states_of_lstm_in_ti(ti, lstm_cell) || rewritten; } } return rewritten; diff --git a/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp b/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp index 06a046c3bfa5f2..5a8cbf2cf87ae3 100644 --- a/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp +++ b/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp @@ -17,6 +17,7 @@ #include "openvino/op/squeeze.hpp" #include "openvino/op/util/multi_subgraph_base.hpp" #include "openvino/op/util/symbolic_info.hpp" +#include "transformations/utils/utils.hpp" namespace { void update_label(const ov::EqTable& table, ov::label_t& label) { @@ -250,10 +251,7 @@ bool ov::pass::OptimizeLabelsUsedAsValues::run_on_model(const std::shared_ptr(op)) - for (const auto& sub_graph : multi_subgraph_op->get_functions()) - if (sub_graph) - run_on_model(sub_graph); + ov::op::util::process_subgraph(*this, op); for (auto& output : op->outputs()) { optimize_value_usage(output, label_shape_source, label_value_source); diff --git a/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp b/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp index 1676c7206fad95..31b3ac6dedc1a9 100644 --- a/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp +++ b/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp @@ -25,6 +25,7 @@ #include "transformations/symbolic_transformations/nop_broadcast.hpp" #include "transformations/symbolic_transformations/reshape_optimizations.hpp" #include "transformations/symbolic_transformations/utils.hpp" +#include "transformations/utils/utils.hpp" using namespace ov::pass; using namespace ov::symbol::util; @@ -106,10 +107,7 @@ bool ov::pass::SymbolicPropagation::run_on_model(const std::shared_ptrrevalidate_and_infer_types(); // Recursively apply transformation for sub-graph based operations - if (auto multi_subgraph_op = std::dynamic_pointer_cast(op)) - for (const auto& sub_graph : multi_subgraph_op->get_functions()) - if (sub_graph) - run_on_model(sub_graph); + ov::op::util::process_subgraph(*this, op); // additional label propagation rules must be triggered here special_case_range_label_propagation(op); diff --git a/src/common/transformations/src/transformations/utils/utils.cpp b/src/common/transformations/src/transformations/utils/utils.cpp index 60cfdd744050d9..a5deb7daffb95f 100644 --- a/src/common/transformations/src/transformations/utils/utils.cpp +++ b/src/common/transformations/src/transformations/utils/utils.cpp @@ -14,6 +14,7 @@ #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/reshape.hpp" +#include "openvino/op/util/multi_subgraph_base.hpp" #include "openvino/opsets/opset1.hpp" #include "openvino/opsets/opset3.hpp" @@ -456,6 +457,20 @@ bool is_on_constant_path(const ov::Output& output) { return status; } +bool process_subgraph(ov::pass::ModelPass& model_pass, const std::shared_ptr& node) { + bool changed = false; + + if (const auto& multi_subgraph_op = std::dynamic_pointer_cast(node)) { + for (const auto& sub_graph : multi_subgraph_op->get_functions()) { + if (sub_graph) { + changed = model_pass.run_on_model(sub_graph) || changed; + } + } + } + + return changed; +} + } // namespace util } // namespace op } // namespace ov diff --git a/src/core/src/pass/constant_folding.cpp b/src/core/src/pass/constant_folding.cpp index 3e93d0da979258..291c91e11b1354 100644 --- a/src/core/src/pass/constant_folding.cpp +++ b/src/core/src/pass/constant_folding.cpp @@ -101,7 +101,7 @@ bool ov::pass::ConstantFolding::run_on_model(const std::shared_ptr& m remove_requires_precision_conversion_attribute(node); node = util::convert_to_supported_precision(node.get()); } else { - rewritten |= restore_original_input_precision(node); + rewritten = restore_original_input_precision(node) || rewritten; } if (rewritten) { @@ -139,7 +139,8 @@ bool ov::pass::ConstantFolding::run_on_model(const std::shared_ptr& m // recursively constant fold operators containing subgraphs (ie: TensorIterator, Loop) size_t sub_graphs_num = sub_graph_node->get_internal_subgraphs_size(); for (size_t sub_graph_ind = 0; sub_graph_ind < sub_graphs_num; ++sub_graph_ind) { - rewritten |= run_on_model(sub_graph_node->get_function(static_cast(sub_graph_ind))); + rewritten = + run_on_model(sub_graph_node->get_function(static_cast(sub_graph_ind))) || rewritten; } } diff --git a/src/core/src/pass/low_latency.cpp b/src/core/src/pass/low_latency.cpp index 13a793a10d4de3..f6111c2617924d 100644 --- a/src/core/src/pass/low_latency.cpp +++ b/src/core/src/pass/low_latency.cpp @@ -13,6 +13,7 @@ #include "openvino/opsets/opset9.hpp" #include "openvino/pass/graph_rewrite.hpp" #include "openvino/util/log.hpp" +#include "transformations/utils/utils.hpp" namespace { std::string generate_variable_name(const std::string& op_name, const std::string& param_name, int64_t variable_idx) { @@ -259,6 +260,8 @@ bool ov::pass::LowLatency2::run_on_model(const std::shared_ptr& f) { ov::SinkVector assigns; for (const auto& op : f->get_ordered_ops()) { + ov::op::util::process_subgraph(*this, op); + if (const auto& sub_graph_op = std::dynamic_pointer_cast(op)) { int64_t variable_id = 0; const auto& func = sub_graph_op->get_function(); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp index 064db31ed49bef..b90b35f9359aa4 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp @@ -11,6 +11,7 @@ #include "openvino/core/rt_info.hpp" #include "snippets/pass/propagate_precision.hpp" #include "cpu/x64/cpu_isa_traits.hpp" +#include "transformations/utils/utils.hpp" using namespace ov::intel_cpu::pass; @@ -30,6 +31,8 @@ bool EnforcePrecision::run_on_model(const std::shared_ptr& f) { bool was_updated = false; for (const auto& op : f->get_ordered_ops()) { + ov::op::util::process_subgraph(*this, op); + const auto& precisions = get_supported_precisions(op); if (precisions.empty()) { diff --git a/src/plugins/intel_cpu/src/utils/print_model.hpp b/src/plugins/intel_cpu/src/utils/print_model.hpp index 4f15a136f5d54c..6f89fc197706b8 100644 --- a/src/plugins/intel_cpu/src/utils/print_model.hpp +++ b/src/plugins/intel_cpu/src/utils/print_model.hpp @@ -20,6 +20,7 @@ #include "openvino/core/node.hpp" #include "openvino/op/constant.hpp" #include "openvino/pass/pass.hpp" +#include "transformations/utils/utils.hpp" namespace ov { namespace pass { @@ -402,6 +403,10 @@ class OPENVINO_API PrintModel : public ov::pass::ModelPass { if (m_file_name.empty()) return false; + for (auto& node : model->get_ordered_ops()) { + ov::op::util::process_subgraph(*this, node); + } + std::ofstream ofs(m_file_name); if (!ofs) { // OPENVINO_WARN << "Error opening file " << m_file_name << " for output" << std::endl; @@ -416,4 +421,4 @@ class OPENVINO_API PrintModel : public ov::pass::ModelPass { std::string m_file_name; }; } // namespace pass -} // namespace ov \ No newline at end of file +} // namespace ov