diff --git a/src/frontends/paddle/src/decoder_proto.cpp b/src/frontends/paddle/src/decoder_proto.cpp index 6ddca6572ff5e9..f67b7575c96bdd 100644 --- a/src/frontends/paddle/src/decoder_proto.cpp +++ b/src/frontends/paddle/src/decoder_proto.cpp @@ -70,6 +70,37 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { return attrs[0].block_idx(); case proto::AttrType::BLOCKS: return std::vector(attrs[0].blocks_idx().begin(), attrs[0].blocks_idx().end()); + case proto::AttrType::SCALARS: { + auto scalars_size = attrs[0].scalars_size(); + if (scalars_size >= 1) { + if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "LONG") { + std::vector res; + res.reserve(scalars_size); + for (int i = 0; i < scalars_size; ++i) { + res.push_back(attrs[0].scalars(i).i()); + } + return res; + } else if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "FLOAT64") { + std::vector res; + res.reserve(scalars_size); + for (int i = 0; i < scalars_size; ++i) { + res.push_back(attrs[0].scalars(i).r()); + } + return res; + } else if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "BOOLEAN") { + std::vector res; + res.reserve(scalars_size); + for (int i = 0; i < scalars_size; ++i) { + res.push_back(attrs[0].scalars(i).b()); + } + return res; + } + } else { + FRONT_END_GENERAL_CHECK(false, + "Conversion from PaddlePaddle to OpenVINO is not supported 0 dims in SCALARS."); + break; + } + } default: FRONT_END_GENERAL_CHECK(false, "Conversion from PaddlePaddle to OpenVINO data type is not supported."); } diff --git a/src/frontends/paddle/src/op/abs.cpp b/src/frontends/paddle/src/op/abs.cpp new file mode 100644 index 00000000000000..a2f2b35816e4f5 --- /dev/null +++ b/src/frontends/paddle/src/op/abs.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/opsets/opset6.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs abs(const NodeContext& node) { + auto data = node.get_input("X"); + return node.default_single_output_mapping({std::make_shared(data)}, {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/argmax.cpp b/src/frontends/paddle/src/op/argmax.cpp index d04424dd1e7f9e..6cc856d1b30fa8 100644 --- a/src/frontends/paddle/src/op/argmax.cpp +++ b/src/frontends/paddle/src/op/argmax.cpp @@ -12,31 +12,29 @@ namespace op { NamedOutputs argmax(const NodeContext& node) { auto data = node.get_input("X"); bool flatten = node.get_attribute("flatten"); - const element::Type& index_element_type = element::i64; - const Output k = ov::opset6::Constant::create(ov::element::i64, {}, {1}); + auto dtype = node.get_attribute("dtype"); + const Output k = ov::opset6::Constant::create(dtype, {}, {1}); if (!flatten) { auto axis = node.get_attribute("axis"); const auto axis_to_remove = ov::opset6::Constant::create(element::u64, Shape{}, {axis}); - auto node_topk = std::make_shared(data, k, axis, "max", "index", index_element_type); + auto node_topk = std::make_shared(data, k, axis, "max", "index", dtype); const auto reshaped_indices = std::make_shared(node_topk->output(1), axis_to_remove); - return node.default_single_output_mapping( - {std::make_shared(reshaped_indices, element::i64)}, - {"Out"}); + return node.default_single_output_mapping({std::make_shared(reshaped_indices, dtype)}, + {"Out"}); } else { int64_t axis = 0; const Output reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1}); auto node_reshape = std::make_shared(data, reshape_flatten, true); - auto node_topk = std::make_shared(node_reshape, k, axis, "max", "index", index_element_type); + auto node_topk = std::make_shared(node_reshape, k, axis, "max", "index", dtype); const auto output_info = node.get_output_port_infos("Out"); size_t output_size = output_info[0].second.size(); if (output_size == 0) { auto out = std::make_shared(node_topk->output(1)); - return node.default_single_output_mapping({std::make_shared(out, element::i64)}, - {"Out"}); + return node.default_single_output_mapping({std::make_shared(out, dtype)}, {"Out"}); } else { return node.default_single_output_mapping( - {std::make_shared(node_topk->output(1), element::i64)}, + {std::make_shared(node_topk->output(1), dtype)}, {"Out"}); } } diff --git a/src/frontends/paddle/src/op/argmin.cpp b/src/frontends/paddle/src/op/argmin.cpp new file mode 100644 index 00000000000000..f454e476cb4e7d --- /dev/null +++ b/src/frontends/paddle/src/op/argmin.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/opsets/opset6.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs argmin(const NodeContext& node) { + auto data = node.get_input("X"); + bool flatten = node.get_attribute("flatten"); + auto dtype = node.get_attribute("dtype"); + const Output k = ov::opset6::Constant::create(ov::element::i64, {}, {1}); + + if (!flatten) { + auto axis = node.get_attribute("axis"); + const auto axis_to_remove = ov::opset6::Constant::create(element::u64, Shape{}, {axis}); + auto node_topk = std::make_shared(data, k, axis, "min", "index", dtype); + const auto reshaped_indices = std::make_shared(node_topk->output(1), axis_to_remove); + return node.default_single_output_mapping({std::make_shared(reshaped_indices, dtype)}, + {"Out"}); + } else { + int64_t axis = 0; + const Output reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1}); + auto node_reshape = std::make_shared(data, reshape_flatten, true); + auto node_topk = std::make_shared(node_reshape, k, axis, "min", "index", dtype); + const auto output_info = node.get_output_port_infos("Out"); + size_t output_size = output_info[0].second.size(); + if (output_size == 0) { + auto out = std::make_shared(node_topk->output(1)); + return node.default_single_output_mapping({std::make_shared(out, dtype)}, {"Out"}); + } else { + return node.default_single_output_mapping( + {std::make_shared(node_topk->output(1), dtype)}, + {"Out"}); + } + } +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/assign_value.cpp b/src/frontends/paddle/src/op/assign_value.cpp index 7ca9a1a6fc364e..eab619bae56d00 100644 --- a/src/frontends/paddle/src/op/assign_value.cpp +++ b/src/frontends/paddle/src/op/assign_value.cpp @@ -15,12 +15,29 @@ NamedOutputs assign_value(const NodeContext& node) { switch (dtype) { case element::i32: { - auto values = node.get_attribute>("int32_values"); - const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + if (node.has_attribute("int32_values")) { + auto values = node.get_attribute>("int32_values"); + const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + } else { + auto values = node.get_attribute>("values"); + auto int32_values = std::vector(values.begin(), values.end()); + const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, int32_values)}; + } break; } case element::f32: { - std::vector values = node.get_attribute>("fp32_values"); + if (node.has_attribute("fp32_values")) { + std::vector values = node.get_attribute>("fp32_values"); + const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + } else { + auto values = node.get_attribute>("values"); + auto values_f32 = std::vector(values.begin(), values.end()); + const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values_f32)}; + } + break; + } + case element::f64: { + auto values = node.get_attribute>("values"); const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; break; } @@ -30,12 +47,16 @@ NamedOutputs assign_value(const NodeContext& node) { break; } case element::i64: { - auto values = node.get_attribute>("int64_values"); + auto values = node.has_attribute("int64_values") ? node.get_attribute>("int64_values") + : node.get_attribute>("values"); const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; break; } default: { - PADDLE_OP_CHECK(node, false, "assign_value only supports int32, int64, float32, bool"); + std::ostringstream oss; + oss << "assign_value only supports int32, int64, float32, float64, bool, but receive dtype[" + << dtype.get_type_name() << "]"; + PADDLE_OP_CHECK(node, false, oss.str()); break; } } diff --git a/src/frontends/paddle/src/op/atan2.cpp b/src/frontends/paddle/src/op/atan2.cpp new file mode 100644 index 00000000000000..c79e0c5876679b --- /dev/null +++ b/src/frontends/paddle/src/op/atan2.cpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/atan.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/opsets/opset6.hpp" +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace paddle { + +template +ov::Output create_same_type_const_scalar(const ov::Output& same_type_output, const T& value) { + if (same_type_output.get_element_type().is_static()) { + return std::make_shared(same_type_output.get_element_type(), ov::Shape{}, value); + } else { + ov::Output const_res = + std::make_shared(ov::element::from(), ov::Shape{}, value); + const_res = std::make_shared(const_res, same_type_output); + return const_res; + } +} + +namespace op { +NamedOutputs atan2(const NodeContext& node) { + // default_op_checks(node, 2, {"Atan2"}); + auto y = node.get_input("X1"); + auto x = node.get_input("X2"); + + // handle the first condition : x>0 + auto div_y_x = make_shared(y, x); + auto atan = make_shared(div_y_x); + auto const_zero = create_same_type_const_scalar(x, 0); + auto result = atan->output(0); + + // handle the second condition : x<0 && y>=0 + auto const_pi = create_same_type_const_scalar(x, std::atan(1.0) * 4); + auto is_x_negative = make_shared(x, const_zero); + auto y_non_negative = make_shared(y, const_zero); + auto cond1 = make_shared(is_x_negative, y_non_negative); + auto atan_y_x_plus_pi = make_shared(atan, const_pi); + result = make_shared(cond1, atan_y_x_plus_pi, result); + + // handle the third condition : x<0 && y<0 + auto is_y_negative = make_shared(y, const_zero); + auto cond2 = make_shared(is_x_negative, is_y_negative); + auto atan_y_x_minus_pi = make_shared(atan, const_pi); + result = make_shared(cond2, atan_y_x_minus_pi, result); + + // handle the fourth condition : x=0 && y>0 + auto is_x_zero = make_shared(x, const_zero); + auto is_y_positive = make_shared(y, const_zero); + auto cond3 = make_shared(is_x_zero, is_y_positive); + auto const_two = create_same_type_const_scalar(x, 2); + auto pi_div_two = make_shared(const_pi, const_two); + result = make_shared(cond3, pi_div_two, result); + + // handle the fifth condition : x=0 && y<0 + auto cond4 = make_shared(is_x_zero, is_y_negative); + auto const_minus_two = create_same_type_const_scalar(x, -2); + auto pi_div_minus_two = make_shared(const_pi, const_minus_two); + result = make_shared(cond4, pi_div_two, result); + NamedOutputs named_outputs; + named_outputs["Out"] = {result}; + return named_outputs; +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/elementwise_ops.cpp b/src/frontends/paddle/src/op/elementwise_ops.cpp index 0708be5a263227..e3096f3f079674 100644 --- a/src/frontends/paddle/src/op/elementwise_ops.cpp +++ b/src/frontends/paddle/src/op/elementwise_ops.cpp @@ -4,6 +4,8 @@ #include "elementwise_ops.hpp" +#include "op_utils.hpp" + namespace ov { namespace frontend { namespace paddle { @@ -72,6 +74,8 @@ NamedOutputs elementwise_floordiv(const NodeContext& node_context) { if (pd_version >= 2005000 || pd_version == 0) { python_div = true; } + x = get_tensor_safe(x); + y = get_tensor_safe(y); return node_context.default_single_output_mapping( {std::make_shared(x, y, diff --git a/src/frontends/paddle/src/op/fill_any_like.cpp b/src/frontends/paddle/src/op/fill_any_like.cpp index bcd320e2f23cbc..b3f9377cd81c16 100644 --- a/src/frontends/paddle/src/op/fill_any_like.cpp +++ b/src/frontends/paddle/src/op/fill_any_like.cpp @@ -3,6 +3,7 @@ // #include "default_opset.hpp" +#include "op_utils.hpp" #include "openvino/frontend/paddle/node_context.hpp" namespace ov { @@ -10,7 +11,7 @@ namespace frontend { namespace paddle { namespace op { NamedOutputs fill_any_like(const NodeContext& node) { - const auto x = node.get_input("X"); + auto x = node.get_input("X"); auto dtype = node.get_attribute("dtype", element::undefined); const auto value = node.get_attribute("value"); if (dtype == element::undefined) { @@ -25,8 +26,8 @@ NamedOutputs fill_any_like(const NodeContext& node) { }); PADDLE_OP_CHECK(node, valid_type, "Invalid dtype! Fill_any_like supports boolean, i16, i32, i64, f16, f32, f64"); const auto value_node = default_opset::Constant::create(dtype, {1}, {value}); + x = get_tensor_safe(x); const auto shape_node = std::make_shared(x); - return node.default_single_output_mapping({std::make_shared(value_node, shape_node)}, {"Out"}); } diff --git a/src/frontends/paddle/src/op/fill_constant.cpp b/src/frontends/paddle/src/op/fill_constant.cpp index 4a674b61d10c86..5df8947d24f62c 100644 --- a/src/frontends/paddle/src/op/fill_constant.cpp +++ b/src/frontends/paddle/src/op/fill_constant.cpp @@ -16,25 +16,30 @@ NamedOutputs fill_constant(const NodeContext& node) { Output shape_node; if (node.has_input("ValueTensor")) { value_node = node.get_input("ValueTensor"); + } else if (dtype == element::boolean) { + bool value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {}, {value}); } else if (dtype == element::i32) { int32_t value = static_cast(node.get_attribute("value")); - value_node = opset6::Constant::create(dtype, {1}, {value}); + value_node = opset6::Constant::create(dtype, {}, {value}); + } else if (dtype == element::f16) { + float value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {}, {value}); } else if (dtype == element::f32) { float value = node.get_attribute("value"); - value_node = opset6::Constant::create(dtype, {1}, {value}); + value_node = opset6::Constant::create(dtype, {}, {value}); + } else if (dtype == element::f64) { + float value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {}, {value}); } else if (dtype == element::i64) { int64_t value = static_cast(node.get_attribute("value")); - value_node = opset6::Constant::create(dtype, {1}, {value}); + value_node = opset6::Constant::create(dtype, {}, {value}); } else { PADDLE_OP_CHECK(node, false, "fill_constant only supports i32, f32, i64"); } - if (shape.empty()) { - shape.emplace_back(1); - } - PADDLE_OP_CHECK(node, - shape.size() > 0 || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"), + node.has_attribute("shape") || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"), "fill_constant shape not set"); if (node.has_input("ShapeTensor")) { @@ -50,7 +55,13 @@ NamedOutputs fill_constant(const NodeContext& node) { } shape_node = Output{std::make_shared(shape_tensor_list, 0)}; } else { - shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape); + if (shape.empty()) { + NamedOutputs named_outputs; + named_outputs["Out"] = {value_node}; + return named_outputs; + } else { + shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape); + } } return node.default_single_output_mapping({std::make_shared(value_node, shape_node)}, diff --git a/src/frontends/paddle/src/op/reduce_any.cpp b/src/frontends/paddle/src/op/reduce_any.cpp new file mode 100644 index 00000000000000..45d543534684ce --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_any.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs reduce_any(const NodeContext& node_context) { + return reduce_ops(node_context); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/scatter.cpp b/src/frontends/paddle/src/op/scatter.cpp new file mode 100644 index 00000000000000..23c9aa6b29b90f --- /dev/null +++ b/src/frontends/paddle/src/op/scatter.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/opsets/opset15.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs scatter(const NodeContext& node) { + auto x = node.get_input("X"); + auto ids = node.get_input("Ids"); + auto updates = node.get_input("Updates"); + bool overwrite = node.get_attribute("overwrite"); + ov::NodeVector node_vec; + if (ids.get_shape().size() == 0) { + ids = std::make_shared(ids, + default_opset::Constant::create(ov::element::i64, {1}, {0})); + } + + node_vec.push_back(default_opset::Constant::create(ov::element::i64, {1}, {ids.get_shape()[0]})); + node_vec.push_back(default_opset::Constant::create(ov::element::i64, {1}, {1})); + auto shape_node = std::make_shared(node_vec, 0); + auto new_ids = std::make_shared(ids, shape_node, true); + if (overwrite) { + return node.default_single_output_mapping({std::make_shared(x, new_ids, updates)}, + {"Out"}); + } else { + auto x_dtype = x.get_element_type(); + const auto value_node = default_opset::Constant::create(x_dtype, {1}, {0}); + const auto shape_node = std::make_shared(x); + const auto zero_node = std::make_shared(value_node, shape_node); + return node.default_single_output_mapping( + {std::make_shared(zero_node, + new_ids, + updates, + ov::opset15::ScatterNDUpdate::Reduction::SUM)}, + {"Out"}); + } +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op/scatter_nd_add.cpp b/src/frontends/paddle/src/op/scatter_nd_add.cpp new file mode 100644 index 00000000000000..7ef842e5a96e8d --- /dev/null +++ b/src/frontends/paddle/src/op/scatter_nd_add.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/opsets/opset15.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs scatter_nd_add(const NodeContext& node) { + auto x = node.get_input("X"); + auto index = node.get_input("Index"); + auto updates = node.get_input("Updates"); + return node.default_single_output_mapping( + {std::make_shared(x, + index, + updates, + ov::opset15::ScatterNDUpdate::Reduction::SUM)}, + {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/set_value.cpp b/src/frontends/paddle/src/op/set_value.cpp index cd6559114519ce..c709ae2bea6695 100644 --- a/src/frontends/paddle/src/op/set_value.cpp +++ b/src/frontends/paddle/src/op/set_value.cpp @@ -5,26 +5,15 @@ #include #include "default_opset.hpp" +#include "op_utils.hpp" #include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/op/util/attr_types.hpp" namespace ov { namespace frontend { namespace paddle { namespace op { -std::shared_ptr get_tensor_list(const OutputVector& node) { - auto tensor_list = node; - for (size_t i = 0; i < node.size(); i++) { - if (node[i].get_shape().size() == 0) { - tensor_list[i] = - std::make_shared(node[i], - default_opset::Constant::create(element::i64, {1}, {0})); - } - } - const auto new_node = std::make_shared(tensor_list, 0); - return new_node; -} - std::shared_ptr handle_minus_index(const std::vector& node, const Output& dim) { const auto new_node = default_opset::Constant::create(element::i64, {node.size()}, node); return new_node; @@ -36,28 +25,75 @@ std::shared_ptr handle_maximum_index(Output& node, const Output(mask, update_node, node); } -bool is_contain_minus(const std::vector vec) { - for (int64_t i : vec) { - if (i < 0) - return true; +void normalize(std::vector& vec, const Output input, const std::vector axes_vec) { + for (size_t i = 0; i < axes_vec.size(); i++) { + if (vec[i] < 0) { + auto x_dim = std::stoll(input.get_partial_shape()[axes_vec[i]].to_string()); + vec[i] = vec[i] + x_dim; + } } - return false; } NamedOutputs set_value(const NodeContext& node) { auto input_node = node.get_input("Input"); - auto value_node = node.get_input("ValueTensor"); PADDLE_OP_CHECK(node, (input_node.get_partial_shape().rank().is_static()), "rank must be static"); const auto dims = static_cast(input_node.get_partial_shape().rank().get_length()); - const auto axes = node.get_attribute>("axes"); + auto axes = node.get_attribute>("axes"); + auto decrease_axes = node.get_attribute>("decrease_axes"); // const auto input_shape_ = input_node.get_partial_shape().get_shape(); // auto input_shape = default_opset::Constant::create(element::i64, {input_shape_.size()}, input_shape_); - auto input_shape = std::make_shared(input_node); - Output starts_node, ends_node, steps_node, starts, ends, steps; + Output input_shape = std::make_shared(input_node); + Output value_node, axes_node, spec_dim_node, starts_node, ends_node, steps_node, starts, ends, steps; + if (node.has_input("ValueTensor")) { + value_node = node.get_input("ValueTensor"); + } else { + auto value_shape = node.get_attribute>("shape"); + auto input_type = node.get_attribute("dtype"); + + if (input_type == ov::element::i32) { + if (node.has_attribute("int32_values")) { + auto value_arrt = node.get_attribute>("int32_values"); + value_node = {default_opset::Constant::create(input_type, + Shape{value_shape.begin(), value_shape.end()}, + value_arrt)}; + } else { + auto value_arrt = node.get_attribute>("values"); + auto int32_value = std::vector(value_arrt.begin(), value_arrt.end()); + value_node = {default_opset::Constant::create(input_type, + Shape{value_shape.begin(), value_shape.end()}, + int32_value)}; + } + } else if (input_type == ov::element::i64) { + auto value_arrt = node.has_attribute("values") ? node.get_attribute>("values") + : node.get_attribute>("int64_values"); + value_node = { + default_opset::Constant::create(input_type, Shape{value_shape.begin(), value_shape.end()}, value_arrt)}; + } else if (input_type == ov::element::f32) { + if (node.has_attribute("fp32_values")) { + auto value_arrt = node.get_attribute>("fp32_values"); + value_node = {default_opset::Constant::create(input_type, + Shape{value_shape.begin(), value_shape.end()}, + value_arrt)}; + } else { + auto value_arrt = node.get_attribute>("values"); + auto fp32_value = std::vector(value_arrt.begin(), value_arrt.end()); + value_node = {default_opset::Constant::create(input_type, + Shape{value_shape.begin(), value_shape.end()}, + fp32_value)}; + } + } else if (input_type == ov::element::f64) { + auto value_arrt = node.has_attribute("values") ? node.get_attribute>("values") + : node.get_attribute>("fp64_values"); + value_node = { + default_opset::Constant::create(input_type, Shape{value_shape.begin(), value_shape.end()}, value_arrt)}; + } else { + PADDLE_OP_CHECK(node, false, "assign_value only supports int32, int64, float32, float64"); + } + } // The following process is: // Given: // input_data: shape(5, 6, 7, 8, 9) @@ -93,44 +129,59 @@ NamedOutputs set_value(const NodeContext& node) { // 7. Use `ScatterUpdate` update update_value into input_data. // 8. Reshape input to original input_shape. - const auto axes_node = default_opset::Constant::create(element::i64, {axes.size(), 1}, axes); - const auto spec_dim_node = std::make_shared(input_shape, axes_node); const auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); const auto one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); const auto dim_node = default_opset::Constant::create(element::i64, Shape{}, {dims}); const auto reshape_flatten = default_opset::Constant::create(ov::element::i64, {1}, {-1}); const auto slice_shape = default_opset::Constant::create(ov::element::i64, {1, 1}, {-1}); + if (axes.size() > 1) { + OutputVector spec_dim_vec; + for (const auto& axis : axes) { + auto spec_dim_node_tmp = std::make_shared( + input_shape, + default_opset::Constant::create(element::i64, {1}, {axis}), + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0})); + spec_dim_vec.emplace_back(spec_dim_node_tmp); + } + axes_node = default_opset::Constant::create(element::i64, {axes.size()}, axes); + axes_node = std::make_shared(axes_node, one_node); + spec_dim_node = std::make_shared(spec_dim_vec, 0); + } else { + axes_node = default_opset::Constant::create(element::i64, {1}, axes); + spec_dim_node = + std::make_shared(input_shape, + axes_node, + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0})); + axes_node = std::make_shared(axes_node, one_node); + } // get positive starts ends and steps if (node.has_input("StartsTensorList")) { - starts = get_tensor_list(node.get_ng_inputs("StartsTensorList")); + auto starts_list = node.get_ng_inputs("StartsTensorList"); + starts = get_tensor_list(starts_list); } else if (node.has_attribute("starts")) { auto start_vec = node.get_attribute>("starts"); - if (is_contain_minus(start_vec)) { - PADDLE_OP_CHECK(node, (false), "Currently not support minus start!"); - } + normalize(start_vec, input_node, axes); starts = handle_minus_index(start_vec, spec_dim_node); } else PADDLE_OP_CHECK(node, (false), "Invalid arguments!"); if (node.has_input("EndsTensorList")) { - ends = get_tensor_list(node.get_ng_inputs("EndsTensorList")); + auto ends_list = node.get_ng_inputs("EndsTensorList"); + ends = get_tensor_list(ends_list); } else if (node.has_attribute("ends")) { auto ends_vec = node.get_attribute>("ends"); - if (is_contain_minus(ends_vec)) { - PADDLE_OP_CHECK(node, (false), "Currently not support minus ends!"); - } + normalize(ends_vec, input_node, axes); ends = handle_minus_index(ends_vec, spec_dim_node); } else PADDLE_OP_CHECK(node, (false), "Invalid arguments!"); if (node.has_input("StepsTensorList")) { - steps = get_tensor_list(node.get_ng_inputs("StepsTensorList")); + auto steps_list = node.get_ng_inputs("StepsTensorList"); + steps = get_tensor_list(steps_list); } else if (node.has_attribute("steps")) { auto step_vec = node.get_attribute>("steps"); - if (is_contain_minus(step_vec)) { - PADDLE_OP_CHECK(node, (false), "Currently not support minus steps!"); - } + normalize(step_vec, input_node, axes); steps = handle_minus_index(step_vec, spec_dim_node); } else PADDLE_OP_CHECK(node, (false), "Invalid arguments!"); @@ -164,16 +215,22 @@ NamedOutputs set_value(const NodeContext& node) { value_shape_update_node = std::make_shared(value_shape_update_node); value_shape_update_node = std::make_shared(value_shape_update_node, element::i64); // 4.4 update - const auto value_target_shape = + Output value_target_shape = std::make_shared(input_shape, axes_node, value_shape_update_node); // 4.5 broadcast + const auto value_dims = static_cast(value_node.get_partial_shape().rank().get_length()); + if (value_dims != dims && decrease_axes.size() > 0) { + value_node = std::make_shared( + value_node, + default_opset::Constant::create(element::i64, {decrease_axes.size()}, decrease_axes)); + } auto value_shape = std::make_shared(value_node); auto value_rank = std::make_shared(value_shape); auto value_rank_scalar = std::make_shared(value_rank); Output broadcast_axes = std::make_shared(zero_node, value_rank_scalar, one_node, element::i64); - value_node = std::make_shared(value_node, value_target_shape, broadcast_axes); + value_node = std::make_shared(value_node, value_target_shape); // get total number of elements const auto numel_node = std::make_shared(input_shape, zero_node); diff --git a/src/frontends/paddle/src/op/slice_ops.hpp b/src/frontends/paddle/src/op/slice_ops.hpp index 7035d11d7769d3..bb0351ed81e69c 100644 --- a/src/frontends/paddle/src/op/slice_ops.hpp +++ b/src/frontends/paddle/src/op/slice_ops.hpp @@ -4,6 +4,7 @@ #include #include "default_opset.hpp" +#include "op_utils.hpp" #include "openvino/frontend/paddle/node_context.hpp" namespace ov { @@ -19,8 +20,7 @@ Output idx_node(const std::string& tensor_alias, return std::make_shared(node.get_input(tensor_alias), element::i32); } else if (node.has_input(list_alias)) { auto inputs = node.get_ng_inputs(list_alias); - return std::make_shared(std::make_shared(inputs, 0), - element::i32); + return std::make_shared(get_tensor_list(inputs), element::i32); } else { auto values = node.get_attribute>(attr_alias); return default_opset::Constant::create(element::i32, {values.size()}, values); diff --git a/src/frontends/paddle/src/op/take_along_axis.cpp b/src/frontends/paddle/src/op/take_along_axis.cpp new file mode 100644 index 00000000000000..81ca472dfa401c --- /dev/null +++ b/src/frontends/paddle/src/op/take_along_axis.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/opsets/opset6.hpp" +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs take_along_axis(const NodeContext& node) { + auto input = node.get_input("Input"); + auto index = node.get_input("Index"); + auto axis = node.get_attribute("Axis"); + return node.default_single_output_mapping({std::make_shared(input, index, axis)}, + {"Result"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/tile.cpp b/src/frontends/paddle/src/op/tile.cpp index 7e8938414a70ad..e611ec8351779a 100644 --- a/src/frontends/paddle/src/op/tile.cpp +++ b/src/frontends/paddle/src/op/tile.cpp @@ -3,6 +3,7 @@ // #include "default_opset.hpp" +#include "op_utils.hpp" #include "openvino/frontend/paddle/node_context.hpp" namespace ov { @@ -11,12 +12,13 @@ namespace paddle { namespace op { NamedOutputs tile(const NodeContext& node) { auto x = node.get_input("X"); + x = get_tensor_safe(x); Output repeats; if (node.has_input("RepeatTimes")) { repeats = node.get_input("RepeatTimes"); } else if (node.has_input("repeat_times_tensor")) { auto repeats_list = node.get_ng_inputs("repeat_times_tensor"); - repeats = std::make_shared(repeats_list, 0); + repeats = get_tensor_list(repeats_list); } else { std::vector repeats_vector = node.get_attribute>("repeat_times", {}); repeats = default_opset::Constant::create(ov::element::i32, Shape{repeats_vector.size()}, repeats_vector); diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index ba422253c2b7b1..deccc55fb96163 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -9,6 +9,7 @@ namespace paddle { namespace op { #define OP_CONVERTER(op) NamedOutputs op(const NodeContext& node) OP_CONVERTER(argmax); +OP_CONVERTER(argmin); OP_CONVERTER(assign); OP_CONVERTER(assign_value); OP_CONVERTER(batch_norm); @@ -142,9 +143,17 @@ OP_CONVERTER(write_to_array); OP_CONVERTER(where_index); OP_CONVERTER(yolo_box); OP_CONVERTER(generate_proposals_v2); +OP_CONVERTER(abs); +OP_CONVERTER(elu); +OP_CONVERTER(atan2); +OP_CONVERTER(scatter); +OP_CONVERTER(scatter_nd_add); +OP_CONVERTER(take_along_axis); +OP_CONVERTER(reduce_any); } // namespace op std::map get_supported_ops() { return {{"arg_max", op::argmax}, + {"arg_min", op::argmin}, {"assign", op::assign}, {"assign_value", op::assign_value}, {"batch_norm", op::batch_norm}, @@ -285,7 +294,14 @@ std::map get_supported_ops() { {"while", op::while_}, {"write_to_array", op::write_to_array}, {"where_index", op::where_index}, - {"yolo_box", op::yolo_box}}; + {"yolo_box", op::yolo_box}, + {"abs", op::abs}, + {"elu", op::elu}, + {"atan2", op::atan2}, + {"scatter", op::scatter}, + {"scatter_nd_add", op::scatter_nd_add}, + {"take_along_axis", op::take_along_axis}, + {"reduce_any", op::reduce_any}}; }; } // namespace paddle diff --git a/src/frontends/paddle/src/op_utils.cpp b/src/frontends/paddle/src/op_utils.cpp new file mode 100644 index 00000000000000..6bb8084a076450 --- /dev/null +++ b/src/frontends/paddle/src/op_utils.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +using namespace ov::frontend::paddle::op::default_opset; +using namespace ov; +using namespace ov::frontend; + +namespace ov { +namespace frontend { +namespace paddle { +Output get_tensor_list(const OutputVector& node) { + auto tensor_list = node; + for (size_t i = 0; i < tensor_list.size(); i++) { + if (tensor_list[i].get_partial_shape().rank().get_length() == 0) { + tensor_list[i] = std::make_shared( + tensor_list[i], + op::default_opset::Constant::create(element::i64, {1}, {0})); + } + } + Output res; + if (node.size() == 1) { + res = tensor_list[0]; + } else { + res = std::make_shared(tensor_list, 0); + } + return res; +} + +Output get_tensor_safe(const Output& node) { + auto node_dim = node.get_partial_shape().rank().get_length(); + if (node_dim == 0) { + return std::make_shared( + node, + op::default_opset::Constant::create(element::i32, {1}, {0})); + } else { + return node; + } +} + +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op_utils.hpp b/src/frontends/paddle/src/op_utils.hpp new file mode 100644 index 00000000000000..69c0a67a1874ff --- /dev/null +++ b/src/frontends/paddle/src/op_utils.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { + +Output get_tensor_list(const OutputVector& node); +Output get_tensor_safe(const Output& node); +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 39d20947219e6e..d9b22dfdd397e6 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -669,6 +669,16 @@ static const std::vector models{ std::string("where_index_4/where_index_4.pdmodel"), std::string("where_index_5/where_index_5.pdmodel"), std::string("where_index_6/where_index_6.pdmodel"), + std::string("abs_float32/abs_float32.pdmodel"), + std::string("atan2/atan2.pdmodel"), + std::string("reduce_any_test_0/reduce_any_test_0.pdmodel"), + std::string("reduce_any_test_1/reduce_any_test_1.pdmodel"), + std::string("reduce_any_test_2/reduce_any_test_2.pdmodel"), + std::string("reduce_any_test_3/reduce_any_test_3.pdmodel"), + std::string("reduce_any_test_4/reduce_any_test_4.pdmodel"), + std::string("scatter_test_1/scatter_test_1.pdmodel"), + std::string("scatter_test_2/scatter_test_2.pdmodel"), + std::string("scatter_nd_add_test_1/scatter_nd_add_test_1.pdmodel"), // Temporily disable them until root caused to secure CI stable. // CVS-66703 to track this. // std::string("yolo_box_clip_box/yolo_box_clip_box.pdmodel"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_abs.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_abs.py new file mode 100644 index 00000000000000..86d9459c8380b0 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_abs.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def abs(name : str, x): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + if paddle.__version__ >= '2.0.0': + abs_node = paddle.abs(node_x, name='abs_node') + else: + abs_node = paddle.fluid.layers.abs(node_x, name='abs_node') + out = paddle.assign(abs_node) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feed_vars=[node_x], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + input_x = np.array([-0.4, -0.2, 0.1, 0.3]).astype(np.float32) + abs("abs_float32", input_x) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_atan2.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_atan2.py new file mode 100644 index 00000000000000..e8a15ac7bd262c --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_atan2.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def atan2(name , x , y): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=y.dtype) + atan2_node = paddle.atan2(node_x,node_y, name='atan2_node') + out = paddle.assign(atan2_node) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feed_vars=[node_x, node_y], fetchlist=[out], inputs=[x,y], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + if paddle.__version__ >= '2.0.0': + input_x = np.array([-1, 1, 1, -1]).astype(np.float32) + input_y = np.array([-1, -1, 1, 1]).astype(np.float32) + atan2("atan2",input_x,input_y) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_any.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_any.py new file mode 100644 index 00000000000000..b527dfd39aa658 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_any.py @@ -0,0 +1,46 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def reduce_any(name : str, x, axis=None, keepdim=False): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + if paddle.__version__ >= '2.0.0': + cast_node=paddle.cast(node_x, dtype="bool") + any_out = paddle.any(cast_node, axis=axis, keepdim=keepdim) + out = paddle.cast(any_out, x.dtype) + else: + cast_node=paddle.fluid.layers.cast(node_x, "bool") + any_out = paddle.fluid.layers.reduce_any(cast_node, axis=axis, keepdim=keepdim) + out = paddle.cast(any_out, x.dtype) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feed_vars=[node_x], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.array([[1,0], [1, 1]]).astype(np.float32) + reduce_any("reduce_any_test_0", data) + reduce_any("reduce_any_test_1", data, axis=0, keepdim=False) + reduce_any("reduce_any_test_2", data, axis=-1, keepdim=False) + reduce_any("reduce_any_test_3", data, axis=1, keepdim=True) + reduce_any("reduce_any_test_4", data, axis=[0,1], keepdim=True) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_scatter.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_scatter.py new file mode 100644 index 00000000000000..46ae4bb0843a04 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_scatter.py @@ -0,0 +1,43 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def scatter(name : str, x,index,updates,overwrite=True): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_index = paddle.static.data(name='index', shape=index.shape, dtype=index.dtype) + node_updates=paddle.static.data(name='updates', shape=updates.shape, dtype=updates.dtype) + out=paddle.scatter(node_x, node_index, node_updates, overwrite) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x,'index':index,'updates':updates}, + fetch_list=[out]) + saveModel(name, exe, feed_vars=[node_x,node_index,node_updates], fetchlist=[out], inputs=[x,index,updates], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) + index = np.array([2, 1, 0, 1]).astype(np.int64) + updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) + + scatter("scatter_test_1", x,index,updates,overwrite=True) + scatter("scatter_test_2", x,index,updates,overwrite=False) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_scatter_nd_add.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_scatter_nd_add.py new file mode 100644 index 00000000000000..c654fd8dd91ad5 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_scatter_nd_add.py @@ -0,0 +1,41 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def scatter_nd_add(name : str, x,index,updates): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_index = paddle.static.data(name='index', shape=index.shape, dtype=index.dtype) + node_updates=paddle.static.data(name='updates', shape=updates.shape, dtype=updates.dtype) + out=paddle.scatter_nd_add(node_x, node_index, node_updates) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x,'index':index,'updates':updates}, + fetch_list=[out]) + saveModel(name, exe, feed_vars=[node_x,node_index,node_updates], fetchlist=[out], inputs=[x,index,updates], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x=np.random.rand(3, 5, 9, 10).astype(np.float32) + index = np.array([[1, 1],[0, 1],[1, 3]]).astype(np.int64) + updates = np.random.rand(3, 9, 10).astype(np.float32) + scatter_nd_add("scatter_nd_add_test_1", x,index,updates) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_take_along_axis.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_take_along_axis.py new file mode 100644 index 00000000000000..cd9777b2681697 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_take_along_axis.py @@ -0,0 +1,39 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def take_along_axis(name : str, x,index,axis): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_index = paddle.static.data(name='index', shape=index.shape, dtype=index.dtype) + out=paddle.take_along_axis(node_x, node_index, axis) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x,'index':index}, + fetch_list=[out]) + saveModel(name, exe, feed_vars=[node_x,node_index], fetchlist=[out], inputs=[x,index], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.array([[1, 2, 3], [4, 5, 6], [7,8,9]]).astype(np.float32) + index = np.array([[0]]).astype(np.int64) + axis=0 + take_along_axis("take_along_axis_test_1", x,index,axis) + + + +if __name__ == "__main__": + main() \ No newline at end of file