Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PDPD]Added and fixed Paddle convert. #28347

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions src/frontends/paddle/src/decoder_proto.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,37 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
return attrs[0].block_idx();
case proto::AttrType::BLOCKS:
return std::vector<std::int32_t>(attrs[0].blocks_idx().begin(), attrs[0].blocks_idx().end());
case proto::AttrType::SCALARS: {
auto scalars_size = attrs[0].scalars_size();
if (scalars_size >= 1) {
if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "LONG") {
std::vector<int64_t> res;
res.reserve(scalars_size);
for (int i = 0; i < scalars_size; ++i) {
res.push_back(attrs[0].scalars(i).i());
}
return res;
} else if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "FLOAT64") {
std::vector<double> res;
res.reserve(scalars_size);
for (int i = 0; i < scalars_size; ++i) {
res.push_back(attrs[0].scalars(i).r());
}
return res;
} else if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "BOOLEAN") {
std::vector<bool> res;
res.reserve(scalars_size);
for (int i = 0; i < scalars_size; ++i) {
res.push_back(attrs[0].scalars(i).b());
}
return res;
}
} else {
FRONT_END_GENERAL_CHECK(false,
"Conversion from PaddlePaddle to OpenVINO is not supported 0 dims in SCALARS.");
break;
}
}
default:
FRONT_END_GENERAL_CHECK(false, "Conversion from PaddlePaddle to OpenVINO data type is not supported.");
}
Expand Down
20 changes: 20 additions & 0 deletions src/frontends/paddle/src/op/abs.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/opsets/opset6.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs abs(const NodeContext& node) {
auto data = node.get_input("X");
return node.default_single_output_mapping({std::make_shared<ov::opset6::Abs>(data)}, {"Out"});
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
18 changes: 8 additions & 10 deletions src/frontends/paddle/src/op/argmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,31 +12,29 @@ namespace op {
NamedOutputs argmax(const NodeContext& node) {
auto data = node.get_input("X");
bool flatten = node.get_attribute<bool>("flatten");
const element::Type& index_element_type = element::i64;
const Output<ov::Node> k = ov::opset6::Constant::create(ov::element::i64, {}, {1});
auto dtype = node.get_attribute<ov::element::Type>("dtype");
const Output<ov::Node> k = ov::opset6::Constant::create(dtype, {}, {1});

if (!flatten) {
auto axis = node.get_attribute<int64_t>("axis");
const auto axis_to_remove = ov::opset6::Constant::create(element::u64, Shape{}, {axis});
auto node_topk = std::make_shared<ov::opset6::TopK>(data, k, axis, "max", "index", index_element_type);
auto node_topk = std::make_shared<ov::opset6::TopK>(data, k, axis, "max", "index", dtype);
const auto reshaped_indices = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1), axis_to_remove);
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::Convert>(reshaped_indices, element::i64)},
{"Out"});
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(reshaped_indices, dtype)},
{"Out"});
} else {
int64_t axis = 0;
const Output<ov::Node> reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1});
auto node_reshape = std::make_shared<ov::opset6::Reshape>(data, reshape_flatten, true);
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "max", "index", index_element_type);
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "max", "index", dtype);
const auto output_info = node.get_output_port_infos("Out");
size_t output_size = output_info[0].second.size();
if (output_size == 0) {
auto out = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1));
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, element::i64)},
{"Out"});
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, dtype)}, {"Out"});
} else {
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), element::i64)},
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), dtype)},
{"Out"});
}
}
Expand Down
46 changes: 46 additions & 0 deletions src/frontends/paddle/src/op/argmin.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/opsets/opset6.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs argmin(const NodeContext& node) {
auto data = node.get_input("X");
bool flatten = node.get_attribute<bool>("flatten");
auto dtype = node.get_attribute<ov::element::Type>("dtype");
const Output<ov::Node> k = ov::opset6::Constant::create(ov::element::i64, {}, {1});

if (!flatten) {
auto axis = node.get_attribute<int64_t>("axis");
const auto axis_to_remove = ov::opset6::Constant::create(element::u64, Shape{}, {axis});
auto node_topk = std::make_shared<ov::opset6::TopK>(data, k, axis, "min", "index", dtype);
const auto reshaped_indices = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1), axis_to_remove);
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(reshaped_indices, dtype)},
{"Out"});
} else {
int64_t axis = 0;
const Output<ov::Node> reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1});
auto node_reshape = std::make_shared<ov::opset6::Reshape>(data, reshape_flatten, true);
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "min", "index", dtype);
const auto output_info = node.get_output_port_infos("Out");
size_t output_size = output_info[0].second.size();
if (output_size == 0) {
auto out = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1));
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, dtype)}, {"Out"});
} else {
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), dtype)},
{"Out"});
}
}
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
31 changes: 26 additions & 5 deletions src/frontends/paddle/src/op/assign_value.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,29 @@ NamedOutputs assign_value(const NodeContext& node) {

switch (dtype) {
case element::i32: {
auto values = node.get_attribute<std::vector<int32_t>>("int32_values");
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
if (node.has_attribute("int32_values")) {
auto values = node.get_attribute<std::vector<int32_t>>("int32_values");
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
} else {
auto values = node.get_attribute<std::vector<int64_t>>("values");
auto int32_values = std::vector<int32_t>(values.begin(), values.end());
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, int32_values)};
}
break;
}
case element::f32: {
std::vector<float> values = node.get_attribute<std::vector<float>>("fp32_values");
if (node.has_attribute("fp32_values")) {
std::vector<float> values = node.get_attribute<std::vector<float>>("fp32_values");
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
} else {
auto values = node.get_attribute<std::vector<double>>("values");
auto values_f32 = std::vector<float>(values.begin(), values.end());
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values_f32)};
}
break;
}
case element::f64: {
auto values = node.get_attribute<std::vector<double>>("values");
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
break;
}
Expand All @@ -30,12 +47,16 @@ NamedOutputs assign_value(const NodeContext& node) {
break;
}
case element::i64: {
auto values = node.get_attribute<std::vector<int64_t>>("int64_values");
auto values = node.has_attribute("int64_values") ? node.get_attribute<std::vector<int64_t>>("int64_values")
: node.get_attribute<std::vector<int64_t>>("values");
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
break;
}
default: {
PADDLE_OP_CHECK(node, false, "assign_value only supports int32, int64, float32, bool");
std::ostringstream oss;
oss << "assign_value only supports int32, int64, float32, float64, bool, but receive dtype["
<< dtype.get_type_name() << "]";
PADDLE_OP_CHECK(node, false, oss.str());
break;
}
}
Expand Down
85 changes: 85 additions & 0 deletions src/frontends/paddle/src/op/atan2.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/atan.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert_like.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/greater.hpp"
#include "openvino/op/greater_eq.hpp"
#include "openvino/op/less.hpp"
#include "openvino/op/logical_and.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/select.hpp"
#include "openvino/op/subtract.hpp"
#include "openvino/opsets/opset6.hpp"
using namespace std;
using namespace ov::op;

namespace ov {
namespace frontend {
namespace paddle {

template <typename T>
ov::Output<ov::Node> create_same_type_const_scalar(const ov::Output<ov::Node>& same_type_output, const T& value) {
if (same_type_output.get_element_type().is_static()) {
return std::make_shared<ov::op::v0::Constant>(same_type_output.get_element_type(), ov::Shape{}, value);
} else {
ov::Output<ov::Node> const_res =
std::make_shared<ov::op::v0::Constant>(ov::element::from<T>(), ov::Shape{}, value);
const_res = std::make_shared<ov::op::v1::ConvertLike>(const_res, same_type_output);
return const_res;
}
}

namespace op {
NamedOutputs atan2(const NodeContext& node) {
// default_op_checks(node, 2, {"Atan2"});
auto y = node.get_input("X1");
auto x = node.get_input("X2");

// handle the first condition : x>0
auto div_y_x = make_shared<v1::Divide>(y, x);
auto atan = make_shared<v0::Atan>(div_y_x);
auto const_zero = create_same_type_const_scalar<int32_t>(x, 0);
auto result = atan->output(0);

// handle the second condition : x<0 && y>=0
auto const_pi = create_same_type_const_scalar<double>(x, std::atan(1.0) * 4);
auto is_x_negative = make_shared<v1::Less>(x, const_zero);
auto y_non_negative = make_shared<v1::GreaterEqual>(y, const_zero);
auto cond1 = make_shared<v1::LogicalAnd>(is_x_negative, y_non_negative);
auto atan_y_x_plus_pi = make_shared<v1::Add>(atan, const_pi);
result = make_shared<v1::Select>(cond1, atan_y_x_plus_pi, result);

// handle the third condition : x<0 && y<0
auto is_y_negative = make_shared<v1::Less>(y, const_zero);
auto cond2 = make_shared<v1::LogicalAnd>(is_x_negative, is_y_negative);
auto atan_y_x_minus_pi = make_shared<v1::Subtract>(atan, const_pi);
result = make_shared<v1::Select>(cond2, atan_y_x_minus_pi, result);

// handle the fourth condition : x=0 && y>0
auto is_x_zero = make_shared<v1::Equal>(x, const_zero);
auto is_y_positive = make_shared<v1::Greater>(y, const_zero);
auto cond3 = make_shared<v1::LogicalAnd>(is_x_zero, is_y_positive);
auto const_two = create_same_type_const_scalar<int32_t>(x, 2);
auto pi_div_two = make_shared<v1::Divide>(const_pi, const_two);
result = make_shared<v1::Select>(cond3, pi_div_two, result);

// handle the fifth condition : x=0 && y<0
auto cond4 = make_shared<v1::LogicalAnd>(is_x_zero, is_y_negative);
auto const_minus_two = create_same_type_const_scalar<int32_t>(x, -2);
auto pi_div_minus_two = make_shared<v1::Divide>(const_pi, const_minus_two);
result = make_shared<v1::Select>(cond4, pi_div_two, result);
NamedOutputs named_outputs;
named_outputs["Out"] = {result};
return named_outputs;
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
4 changes: 4 additions & 0 deletions src/frontends/paddle/src/op/elementwise_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

#include "elementwise_ops.hpp"

#include "op_utils.hpp"

namespace ov {
namespace frontend {
namespace paddle {
Expand Down Expand Up @@ -72,6 +74,8 @@ NamedOutputs elementwise_floordiv(const NodeContext& node_context) {
if (pd_version >= 2005000 || pd_version == 0) {
python_div = true;
}
x = get_1d_tensor(x);
y = get_1d_tensor(y);
return node_context.default_single_output_mapping(
{std::make_shared<default_opset::Divide>(x,
y,
Expand Down
5 changes: 3 additions & 2 deletions src/frontends/paddle/src/op/fill_any_like.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,15 @@
//

#include "default_opset.hpp"
#include "op_utils.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs fill_any_like(const NodeContext& node) {
const auto x = node.get_input("X");
auto x = node.get_input("X");
auto dtype = node.get_attribute<ov::element::Type>("dtype", element::undefined);
const auto value = node.get_attribute<float>("value");
if (dtype == element::undefined) {
Expand All @@ -25,8 +26,8 @@ NamedOutputs fill_any_like(const NodeContext& node) {
});
PADDLE_OP_CHECK(node, valid_type, "Invalid dtype! Fill_any_like supports boolean, i16, i32, i64, f16, f32, f64");
const auto value_node = default_opset::Constant::create(dtype, {1}, {value});
x = get_1d_tensor(x);
const auto shape_node = std::make_shared<default_opset::ShapeOf>(x);

return node.default_single_output_mapping({std::make_shared<default_opset::Broadcast>(value_node, shape_node)},
{"Out"});
}
Expand Down
25 changes: 20 additions & 5 deletions src/frontends/paddle/src/op/fill_constant.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,24 @@ NamedOutputs fill_constant(const NodeContext& node) {
Output<Node> shape_node;
if (node.has_input("ValueTensor")) {
value_node = node.get_input("ValueTensor");
} else if (dtype == element::boolean) {
bool value = static_cast<bool>(node.get_attribute<float>("value"));
value_node = opset6::Constant::create(dtype, {}, {value});
} else if (dtype == element::i32) {
int32_t value = static_cast<int32_t>(node.get_attribute<float>("value"));
value_node = opset6::Constant::create(dtype, {1}, {value});
value_node = opset6::Constant::create(dtype, {}, {value});
} else if (dtype == element::f16) {
float value = static_cast<float16>(node.get_attribute<float>("value"));
value_node = opset6::Constant::create(dtype, {}, {value});
} else if (dtype == element::f32) {
float value = node.get_attribute<float>("value");
value_node = opset6::Constant::create(dtype, {1}, {value});
value_node = opset6::Constant::create(dtype, {}, {value});
} else if (dtype == element::f64) {
float value = static_cast<double>(node.get_attribute<float>("value"));
value_node = opset6::Constant::create(dtype, {}, {value});
} else if (dtype == element::i64) {
int64_t value = static_cast<int64_t>(node.get_attribute<float>("value"));
value_node = opset6::Constant::create(dtype, {1}, {value});
value_node = opset6::Constant::create(dtype, {}, {value});
} else {
PADDLE_OP_CHECK(node, false, "fill_constant only supports i32, f32, i64");
}
Expand All @@ -34,7 +43,7 @@ NamedOutputs fill_constant(const NodeContext& node) {
}

PADDLE_OP_CHECK(node,
shape.size() > 0 || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"),
node.has_attribute("shape") || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"),
"fill_constant shape not set");

if (node.has_input("ShapeTensor")) {
Expand All @@ -50,7 +59,13 @@ NamedOutputs fill_constant(const NodeContext& node) {
}
shape_node = Output<Node>{std::make_shared<opset6::Concat>(shape_tensor_list, 0)};
} else {
shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape);
if (shape.empty()) {
NamedOutputs named_outputs;
named_outputs["Out"] = {value_node};
return named_outputs;
} else {
shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape);
}
}

return node.default_single_output_mapping({std::make_shared<ov::opset6::Broadcast>(value_node, shape_node)},
Expand Down
18 changes: 18 additions & 0 deletions src/frontends/paddle/src/op/reduce_any.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "reduce_ops.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_any(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceLogicalOr>(node_context);
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
Loading
Loading