diff --git a/src/frontends/paddle/src/default_opset.hpp b/src/frontends/paddle/src/default_opset.hpp index c3eed5b5653c92..a5dc374964d485 100644 --- a/src/frontends/paddle/src/default_opset.hpp +++ b/src/frontends/paddle/src/default_opset.hpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "openvino/opsets/opset9.hpp" +#include "openvino/opsets/opset14.hpp" namespace ov { namespace frontend { namespace paddle { namespace op { -namespace default_opset = ov::opset9; +namespace default_opset = ov::opset14; } // namespace op } // namespace paddle diff --git a/src/frontends/paddle/src/op/elu.cpp b/src/frontends/paddle/src/op/elu.cpp new file mode 100644 index 00000000000000..c51a2af6f9f176 --- /dev/null +++ b/src/frontends/paddle/src/op/elu.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/frontend/paddle/visibility.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs elu(const NodeContext& node) { + auto data = node.get_input("X"); + auto alpha = node.get_attribute("alpha", 1.0); + const auto& elu_node = std::make_shared(data, alpha); + return node.default_single_output_mapping({elu_node}, {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/expand_v2.cpp b/src/frontends/paddle/src/op/expand_v2.cpp index d79e49db286c13..ea174efa3a9920 100644 --- a/src/frontends/paddle/src/op/expand_v2.cpp +++ b/src/frontends/paddle/src/op/expand_v2.cpp @@ -19,8 +19,16 @@ NamedOutputs expand_v2(const NodeContext& node) { auto inputs = node.get_ng_inputs("expand_shapes_tensor"); ov::NodeVector node_vec; for (auto& input : inputs) { + if (input.get_partial_shape().rank().get_length() == 0) { + // should unsqueeze the input with non-shape. + auto unsqueeze_scalar = default_opset::Constant::create(ov::element::i32, {}, {0}); + input = std::make_shared(input, unsqueeze_scalar); + } + PADDLE_OP_CHECK(node, + input.get_partial_shape().rank().get_length() == 1, + "the rank of conv input must == 1"); auto cast = std::make_shared(input, element::i32); - node_vec.push_back(cast); + node_vec.emplace_back(cast); } shape_expected_node = std::make_shared(node_vec, 0); } else { diff --git a/src/frontends/paddle/src/op/eye.cpp b/src/frontends/paddle/src/op/eye.cpp new file mode 100644 index 00000000000000..3734d6fab44817 --- /dev/null +++ b/src/frontends/paddle/src/op/eye.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs eye(const NodeContext& node) { + auto row = node.get_attribute("num_rows"); + auto col = node.get_attribute("num_columns", row); + auto dtype = node.get_attribute("dtype", ov::element::f32); + + const auto& row_node = std::make_shared(ov::element::i64, Shape{}, (row)); + const auto& col_node = std::make_shared(ov::element::i64, Shape{}, (col)); + const auto& diagonal_index_node = std::make_shared(ov::element::i32, Shape{}, (0)); + + std::shared_ptr out_node; + if (dtype == ov::element::i32 || dtype == ov::element::i64) { + out_node = std::make_shared(row_node, col_node, diagonal_index_node, dtype); + } else { + const auto& eye_node = + std::make_shared(row_node, col_node, diagonal_index_node, ov::element::i32); + out_node = std::make_shared(eye_node, dtype); + } + + return node.default_single_output_mapping({out_node}, {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/fill_constant.cpp b/src/frontends/paddle/src/op/fill_constant.cpp index b066fdfbe7a0c7..4a674b61d10c86 100644 --- a/src/frontends/paddle/src/op/fill_constant.cpp +++ b/src/frontends/paddle/src/op/fill_constant.cpp @@ -29,6 +29,10 @@ NamedOutputs fill_constant(const NodeContext& node) { PADDLE_OP_CHECK(node, false, "fill_constant only supports i32, f32, i64"); } + if (shape.empty()) { + shape.emplace_back(1); + } + PADDLE_OP_CHECK(node, shape.size() > 0 || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"), "fill_constant shape not set"); diff --git a/src/frontends/paddle/src/op/interp.cpp b/src/frontends/paddle/src/op/interp.cpp index e7b317f2888a83..5ab551dc3bdde2 100644 --- a/src/frontends/paddle/src/op/interp.cpp +++ b/src/frontends/paddle/src/op/interp.cpp @@ -4,6 +4,7 @@ #include "default_opset.hpp" #include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/opsets/opset4.hpp" namespace ov { namespace frontend { @@ -147,8 +148,9 @@ static NamedOutputs interpolate(const NodeContext& node, attrs.pads_begin = {0, 0, 0, 0}; attrs.pads_end = {0, 0, 0, 0}; - return node.default_single_output_mapping({std::make_shared(x, target_spatial_shape, scales, attrs)}, - {"Out"}); + return node.default_single_output_mapping( + {std::make_shared(x, target_spatial_shape, scales, attrs)}, + {"Out"}); } NamedOutputs linear_interp_v2(const NodeContext& node) { diff --git a/src/frontends/paddle/src/op/reduce_ops.hpp b/src/frontends/paddle/src/op/reduce_ops.hpp index 2b595160420282..954d1de425c924 100644 --- a/src/frontends/paddle/src/op/reduce_ops.hpp +++ b/src/frontends/paddle/src/op/reduce_ops.hpp @@ -31,6 +31,10 @@ NamedOutputs reduce_ops(const NodeContext& node) { dims = node.get_attribute>("dim"); } + std::transform(dims.begin(), dims.end(), dims.begin(), [&input_rank](int64_t value) { + return value >= 0 ? value : value + input_rank; + }); + int64_t axis_size = static_cast(dims.size()); reduce_all = reduce_all || (axis_size == input_rank || axis_size == 0); diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 769492eb13d1b8..e092f16095abe0 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -39,9 +39,11 @@ OP_CONVERTER(elementwise_sub); OP_CONVERTER(equal); OP_CONVERTER(greater_equal); OP_CONVERTER(not_equal); +OP_CONVERTER(elu); OP_CONVERTER(embedding); OP_CONVERTER(exp); OP_CONVERTER(expand_v2); +OP_CONVERTER(eye); OP_CONVERTER(flip); OP_CONVERTER(flatten_contiguous_range); OP_CONVERTER(floor); @@ -173,9 +175,11 @@ std::map get_supported_ops() { {"elementwise_sub", op::elementwise_sub}, {"dropout", op::dropout}, {"elementwise_pow", op::elementwise_pow}, + {"elu", op::elu}, {"equal", op::equal}, {"exp", op::exp}, {"expand_v2", op::expand_v2}, + {"eye", op::eye}, {"fill_any_like", op::fill_any_like}, {"fill_constant", op::fill_constant}, {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 99357a3a336d01..53ea7852604376 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -188,6 +188,7 @@ static const std::vector models{ std::string("elementwise_floordiv_int64_2/elementwise_floordiv_int64_2.pdmodel"), std::string("elementwise_floordiv_int64_3/elementwise_floordiv_int64_3.pdmodel"), std::string("elementwise_mul_bool1/elementwise_mul_bool1.pdmodel"), + std::string("elu/elu.pdmodel"), std::string("embedding_0/embedding_0.pdmodel"), std::string("embedding_sparse/embedding_sparse.pdmodel"), std::string("embedding_none_weight/embedding_none_weight.pdmodel"), @@ -201,6 +202,9 @@ static const std::vector models{ std::string("expand_v2_tensor_list/expand_v2_tensor_list.pdmodel"), std::string("expand_v2_tensor_list2/expand_v2_tensor_list2.pdmodel"), std::string("exp_test_float32/exp_test_float32.pdmodel"), + std::string("eye/eye.pdmodel"), + std::string("eye_int32/eye_int32.pdmodel"), + std::string("eye_int64/eye_int64.pdmodel"), std::string("flip_1/flip_1.pdmodel"), std::string("flip_2/flip_2.pdmodel"), std::string("flip_3/flip_3.pdmodel"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_elu.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_elu.py new file mode 100644 index 00000000000000..4dc67b2051222b --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_elu.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# relu6 paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def elu(name: str, x, alpha=None, data_type='float32'): + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.elu(node_x, alpha, name='elu') + else: + out = paddle.fluid.layers.elu(node_x, alpha, name='elu') + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feed_vars=[node_x], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'float32' + data = np.random.randn(2, 3, 4).astype('float32') + elu("elu", data) + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_eye.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_eye.py new file mode 100644 index 00000000000000..9b1a4f668c3ab2 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_eye.py @@ -0,0 +1,41 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def eye(name : str, rows, cols = None, dtype = None): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + if paddle.__version__ >= '2.0.0': + x1 = paddle.eye(num_rows=rows, num_columns=cols, dtype=dtype, name='fill') + x2 = paddle.eye(num_rows=rows, num_columns=cols, dtype=dtype, name='fill') + else: + x1 = paddle.fluid.layers.eye(num_rows=rows, num_columns=cols, dtype=dtype, name='fill_constant') + x2 = paddle.fluid.layers.eye(num_rows=rows, num_columns=cols, dtype=dtype, name='fill_constant') + out = paddle.add(x1, x2) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feed_vars=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + eye("eye", 3) + eye("eye_int32", 2, 3, "int32") + eye("eye_int64", 2, 3, "int64") + +if __name__ == "__main__": + main()