Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
bukejiyu committed Jan 16, 2025
1 parent 3813234 commit 9ac73ff
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 24 deletions.
4 changes: 2 additions & 2 deletions src/frontends/paddle/src/op/elementwise_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ NamedOutputs elementwise_floordiv(const NodeContext& node_context) {
if (pd_version >= 2005000 || pd_version == 0) {
python_div = true;
}
x = get_1d_tensor(x);
y = get_1d_tensor(y);
x = get_tensor_safe(x);
y = get_tensor_safe(y);
return node_context.default_single_output_mapping(
{std::make_shared<default_opset::Divide>(x,
y,
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/paddle/src/op/fill_any_like.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ NamedOutputs fill_any_like(const NodeContext& node) {
});
PADDLE_OP_CHECK(node, valid_type, "Invalid dtype! Fill_any_like supports boolean, i16, i32, i64, f16, f32, f64");
const auto value_node = default_opset::Constant::create(dtype, {1}, {value});
x = get_1d_tensor(x);
x = get_tensor_safe(x);
const auto shape_node = std::make_shared<default_opset::ShapeOf>(x);
return node.default_single_output_mapping({std::make_shared<default_opset::Broadcast>(value_node, shape_node)},
{"Out"});
Expand Down
1 change: 0 additions & 1 deletion src/frontends/paddle/src/op/scatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/opsets/opset15.hpp"
#include "openvino/opsets/opset4.hpp"

namespace ov {
namespace frontend {
Expand Down
32 changes: 15 additions & 17 deletions src/frontends/paddle/src/op/set_value.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ std::shared_ptr<Node> handle_maximum_index(Output<Node>& node, const Output<Node
return std::make_shared<default_opset::Select>(mask, update_node, node);
}

void normalize(std::vector<int64_t>& vec, const Output<Node> intput, const std::vector<int64_t> axes_vec) {
void normalize(std::vector<int64_t>& vec, const Output<Node> input, const std::vector<int64_t> axes_vec) {
for (size_t i = 0; i < axes_vec.size(); i++) {
if (vec[i] < 0) {
auto x_dim = std::stoll(intput.get_partial_shape()[axes_vec[i]].to_string());
auto x_dim = std::stoll(input.get_partial_shape()[axes_vec[i]].to_string());
vec[i] = vec[i] + x_dim;
}
}
Expand All @@ -53,46 +53,44 @@ NamedOutputs set_value(const NodeContext& node) {
value_node = node.get_input("ValueTensor");
} else {
auto value_shape = node.get_attribute<std::vector<int64_t>>("shape");
auto intput_type = node.get_attribute<ov::element::Type>("dtype");
auto input_type = node.get_attribute<ov::element::Type>("dtype");

if (intput_type == ov::element::i32) {
if (input_type == ov::element::i32) {
if (node.has_attribute("int32_values")) {
auto value_arrt = node.get_attribute<std::vector<int32_t>>("int32_values");
value_node = {default_opset::Constant::create(intput_type,
value_node = {default_opset::Constant::create(input_type,
Shape{value_shape.begin(), value_shape.end()},
value_arrt)};
} else {
auto value_arrt = node.get_attribute<std::vector<int64_t>>("values");
auto int32_value = std::vector<int32_t>(value_arrt.begin(), value_arrt.end());
value_node = {default_opset::Constant::create(intput_type,
value_node = {default_opset::Constant::create(input_type,
Shape{value_shape.begin(), value_shape.end()},
int32_value)};
}
} else if (intput_type == ov::element::i64) {
} else if (input_type == ov::element::i64) {
auto value_arrt = node.has_attribute("values") ? node.get_attribute<std::vector<int64_t>>("values")
: node.get_attribute<std::vector<int64_t>>("int64_values");
value_node = {default_opset::Constant::create(intput_type,
Shape{value_shape.begin(), value_shape.end()},
value_arrt)};
} else if (intput_type == ov::element::f32) {
value_node = {
default_opset::Constant::create(input_type, Shape{value_shape.begin(), value_shape.end()}, value_arrt)};
} else if (input_type == ov::element::f32) {
if (node.has_attribute("fp32_values")) {
auto value_arrt = node.get_attribute<std::vector<float>>("fp32_values");
value_node = {default_opset::Constant::create(intput_type,
value_node = {default_opset::Constant::create(input_type,
Shape{value_shape.begin(), value_shape.end()},
value_arrt)};
} else {
auto value_arrt = node.get_attribute<std::vector<double>>("values");
auto fp32_value = std::vector<float>(value_arrt.begin(), value_arrt.end());
value_node = {default_opset::Constant::create(intput_type,
value_node = {default_opset::Constant::create(input_type,
Shape{value_shape.begin(), value_shape.end()},
fp32_value)};
}
} else if (intput_type == ov::element::f64) {
} else if (input_type == ov::element::f64) {
auto value_arrt = node.has_attribute("values") ? node.get_attribute<std::vector<double>>("values")
: node.get_attribute<std::vector<double>>("fp64_values");
value_node = {default_opset::Constant::create(intput_type,
Shape{value_shape.begin(), value_shape.end()},
value_arrt)};
value_node = {
default_opset::Constant::create(input_type, Shape{value_shape.begin(), value_shape.end()}, value_arrt)};
} else {
PADDLE_OP_CHECK(node, false, "assign_value only supports int32, int64, float32, float64");
}
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/paddle/src/op/tile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace paddle {
namespace op {
NamedOutputs tile(const NodeContext& node) {
auto x = node.get_input("X");
x = get_1d_tensor(x);
x = get_tensor_safe(x);
auto x_dims = static_cast<int64_t>(x.get_partial_shape().rank().get_length());
Output<Node> repeats;
if (node.has_input("RepeatTimes")) {
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/paddle/src/op_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ Output<Node> get_tensor_list(const OutputVector& node) {
return res;
}

Output<Node> get_1d_tensor(const Output<Node>& node) {
Output<Node> get_tensor_safe(const Output<Node>& node) {
auto node_dim = node.get_partial_shape().rank().get_length();
if (node_dim == 0) {
return std::make_shared<op::default_opset::Unsqueeze>(
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/paddle/src/op_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace frontend {
namespace paddle {

Output<Node> get_tensor_list(const OutputVector& node);
Output<Node> get_1d_tensor(const Output<Node>& node);
Output<Node> get_tensor_safe(const Output<Node>& node);
} // namespace paddle
} // namespace frontend
} // namespace ov

0 comments on commit 9ac73ff

Please sign in to comment.