Skip to content

Commit

Permalink
[core] Api 2.0/migrate Add operator to new API (#19984)
Browse files Browse the repository at this point in the history
* Migrate Add operator to new API

* Remove `visit_attributes` as it calls base impl

* Use shape inference to calculate broadcast shape
  • Loading branch information
praasz committed Sep 22, 2023
1 parent aa293c0 commit e7c1344
Show file tree
Hide file tree
Showing 9 changed files with 107 additions and 106 deletions.
5 changes: 0 additions & 5 deletions src/core/include/openvino/op/add.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,6 @@ class OPENVINO_API Add : public util::BinaryElementwiseArithmetic {

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool visit_attributes(AttributeVisitor& visitor) override;

OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
Expand Down
27 changes: 17 additions & 10 deletions src/core/reference/include/openvino/reference/add.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,37 @@

#pragma once

#include <algorithm>
#include <cstddef>

#include "ngraph/shape_util.hpp"
#include "openvino/reference/autobroadcast_binop.hpp"

namespace ov {
namespace reference {
template <typename T>
void add(const T* arg0, const T* arg1, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = arg0[i] + arg1[i];
}

template <class T>
void add(const T* arg0, const T* arg1, T* out, const size_t count) {
std::transform(arg0, std::next(arg0, count), arg1, out, std::plus<T>());
}

template <typename T>
/**
* @brief Reference implementation of binary elementwise Add operator.
*
* @param arg0 Pointer to input 0 data.
* @param arg1 Pointer to input 1 data.
* @param out Pointer to output data.
* @param arg_shape0 Input 0 shape.
* @param arg_shape1 Input 1 shape.
* @param broadcast_spec Broadcast specification mode.
*/
template <class T>
void add(const T* arg0,
const T* arg1,
T* out,
const Shape& arg0_shape,
const Shape& arg1_shape,
const op::AutoBroadcastSpec& broadcast_spec) {
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T {
return x + y;
});
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::plus<T>());
}
} // namespace reference
} // namespace ov
3 changes: 2 additions & 1 deletion src/core/shape_inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME shape_inference)

target_include_directories(${TARGET_NAME} PUBLIC
$<BUILD_INTERFACE:${SHAPE_INFER_INCLUDE_DIR}>
$<BUILD_INTERFACE:${OV_CORE_INCLUDE_PATH}>)
$<BUILD_INTERFACE:${OV_CORE_INCLUDE_PATH}>
$<BUILD_INTERFACE:$<TARGET_PROPERTY:openvino::core::dev,INTERFACE_INCLUDE_DIRECTORIES>>)

ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})

Expand Down
11 changes: 11 additions & 0 deletions src/core/shape_inference/include/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,17 @@ ov::optional<TResult> get_input_bounds(const ov::Node* op, size_t port, const IT
}
return out;
}

/**
* @brief Inference broadcast shape for element wise operator according to broadcast specification stored in operator.
*
* @param op Pointer to operator.
* @param first First input shape.
* @param second Second input shape.
*
* @return Result shape from inputs with applied broadcast specification.
*/
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::Shape& first, const ov::Shape& second);
} // namespace op

/**
Expand Down
16 changes: 16 additions & 0 deletions src/core/shape_inference/src/utils.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "utils.hpp"

#include "eltwise_shape_inference.hpp"

namespace ov {
namespace op {

ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::Shape& first, const ov::Shape& second) {
return eltwise_shape_infer(op, std::vector<ov::PartialShape>{first, second}).front().to_shape();
}
} // namespace op
} // namespace ov
132 changes: 52 additions & 80 deletions src/core/src/op/add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,111 +2,83 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "ngraph/op/add.hpp"
#include "openvino/op/add.hpp"

#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/reference/add.hpp"
#include "utils.hpp"

using namespace std;
using namespace ngraph;

OPENVINO_SUPPRESS_DEPRECATED_START
namespace ov {
namespace op {
namespace add {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec) {
ov::reference::add(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<ET>(),
arg0->get_shape(),
arg1->get_shape(),
broadcast_spec);
return true;
}
struct Evaluate : element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;

bool evaluate_add(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec) {
bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_add, i8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, bf16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, f32, arg0, arg1, out, broadcast_spec);
default:
rc = false;
break;
template <element::Type_t ET>
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::add(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
in0.get_shape(),
in1.get_shape(),
broadcast_spec);
return true;
}
return rc;
}
} // namespace
};
} // namespace add

// ------------------------------- v1 ------------------------------------------

op::v1::Add::Add(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
namespace v1 {
Add::Add(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) {
constructor_validate_and_infer_types();
}

bool op::v1::Add::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v1_Add_visit_attributes);
BinaryElementwiseArithmetic::visit_attributes(visitor);
return true;
}

shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Add::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_Add_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
return std::make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
}

bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v1_Add_evaluate);
return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);

bool op::v1::Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v1_Add_evaluate);
if (std::none_of(inputs.cbegin(), inputs.cend(), [](const ov::Tensor& t) {
return is_vector(t.get_shape()) && t.get_shape().front() == 0;
})) {
return BinaryElementwiseArithmetic::evaluate(outputs, inputs);
} else {
return true;
}
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<add::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
get_autob());
}

bool op::v1::Add::has_evaluate() const {
bool Add::has_evaluate() const {
OV_OP_SCOPE(v1_Add_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i8:
case ngraph::element::i16:
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u8:
case ngraph::element::u16:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::bf16:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i8:
case element::i16:
case element::i32:
case element::i64:
case element::u8:
case element::u16:
case element::u32:
case element::u64:
case element::bf16:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v1
} // namespace op
} // namespace ov
4 changes: 2 additions & 2 deletions src/core/src/op/mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "element_visitor.hpp"
#include "itt.hpp"
#include "openvino/reference/mod.hpp"
#include "shape_util.hpp"
#include "utils.hpp"

namespace ov {
namespace op {
Expand Down Expand Up @@ -49,7 +49,7 @@ bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);

outputs[0].set_shape(ov::util::get_broadcast_shape(inputs[0].get_shape(), inputs[1].get_shape(), get_autob()));
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
using namespace ov::element;
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<mod::Evaluate>(inputs[0].get_element_type(),
inputs[0],
Expand Down
13 changes: 6 additions & 7 deletions src/core/src/op/xor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include "itt.hpp"
#include "openvino/op/logical_xor.hpp"
#include "openvino/reference/xor.hpp"
#include "shape_util.hpp"
#include "utils.hpp"

namespace ov {
namespace op {
Expand Down Expand Up @@ -37,18 +37,17 @@ bool input_supported_type(const element::Type& et) {
return et == element::boolean;
}

bool evaluate(TensorVector& outputs, const TensorVector& inputs, const AutoBroadcastSpec& broadcast_spec) {
bool evaluate(const Node* const op, TensorVector& outputs, const TensorVector& inputs) {
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);

outputs[0].set_shape(ov::util::get_broadcast_shape(inputs[0].get_shape(), inputs[1].get_shape(), broadcast_spec));

outputs[0].set_shape(infer_broadcast_shape(op, inputs[0].get_shape(), inputs[1].get_shape()));
using namespace ov::element;
return IfTypeOf<boolean>::apply<logxor::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
broadcast_spec);
op->get_autob());
}
} // namespace
} // namespace logxor
Expand All @@ -68,7 +67,7 @@ std::shared_ptr<Node> Xor::clone_with_new_inputs(const OutputVector& new_args) c
bool Xor::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Xor_evaluate);

return logxor::evaluate(outputs, inputs, get_autob());
return logxor::evaluate(this, outputs, inputs);
}

bool Xor::has_evaluate() const {
Expand All @@ -92,7 +91,7 @@ std::shared_ptr<Node> LogicalXor::clone_with_new_inputs(const OutputVector& new_
bool LogicalXor::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_LogicalXor_evaluate);

return logxor::evaluate(outputs, inputs, get_autob());
return logxor::evaluate(this, outputs, inputs);
}

bool LogicalXor::has_evaluate() const {
Expand Down
2 changes: 1 addition & 1 deletion src/core/tests/pass/constant_folding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3847,7 +3847,7 @@ class MockAddOp : public ov::op::v1::Add {
const ov::op::AutoBroadcastSpec& auto_broadcast = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY))
: ov::op::v1::Add(arg0, arg1, auto_broadcast) {
ON_CALL(*this, evaluate).WillByDefault([this](ov::TensorVector& outputs, const ov::TensorVector& inputs) {
return ov::Node::evaluate(outputs, inputs);
return ov::op::v1::Add::evaluate(outputs, inputs);
});
}
MOCK_METHOD(bool,
Expand Down

0 comments on commit e7c1344

Please sign in to comment.