Skip to content

Commit

Permalink
Invoke all ONNX tests (#502)
Browse files Browse the repository at this point in the history
* Integrated all test;

* Redir folder;

* add report script;

* 1;

* update Reduce ops for opset18 and ground_truth results

---------

Co-authored-by: github <[email protected]>
Co-authored-by: Jilong Xue <[email protected]>
  • Loading branch information
3 people authored Feb 17, 2023
1 parent a11b9f3 commit 1e88dca
Show file tree
Hide file tree
Showing 15 changed files with 13,995 additions and 14 deletions.
53 changes: 53 additions & 0 deletions src/nnfusion/frontend/onnx_import/op/cast_like.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

//----------------------------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//----------------------------------------------------------------------------------------------

#pragma once

#include "../util/util.hpp"
#include "core/node.hpp"

namespace nnfusion
{
namespace frontend
{
namespace onnx_import
{
namespace set_15
{
NamedNodeVector TranslateCastLikeOp(const onnx::NodeProto& node_proto,
const NodeMap& all_ng_nodes,
std::shared_ptr<nnfusion::graph::Graph> m_graph)
{
auto input_gnode = GetInputNode(all_ng_nodes, node_proto, 0);
auto type_gnode = GetInputNode(all_ng_nodes, node_proto, 1);
Node node(node_proto);
element::Type et_type = type_gnode->get_element_type();

auto op = std::make_shared<op::Convert>(et_type);
op->set_name(node_proto.output(0));
auto gnode = m_graph->add_node_and_edge(op, {input_gnode});
NamedNodeVector ret{{node_proto.output(0), gnode}};
return ret;
}
} // namespace set_15
} //namespace onnx_import
} // namespace frontend
} // namespace nnfusion
95 changes: 95 additions & 0 deletions src/nnfusion/frontend/onnx_import/op/reduce.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,101 @@ namespace nnfusion

} // namespace set_13

namespace set_18
{
// Opset18 has move the axes to input
template <typename PrologueOp, typename ReduceOp, typename EpilogueOp>
NamedNodeVector TranslateReduceOp(const onnx::NodeProto& node_proto,
const NodeMap& all_ng_nodes,
std::shared_ptr<nnfusion::graph::Graph> m_graph)
{
auto input_indexs = GetAllInputIndex(all_ng_nodes, node_proto);
NNFUSION_CHECK(input_indexs.size() > 0);
auto input_index = input_indexs[0];
auto input_shape = input_index.get_shape();

Node node(node_proto);
auto keepdims = node.get_attribute_value<int64>("keepdims", 1);

std::vector<int64> axes;
if (input_indexs.size() == 2)
{
GetValueFromNGraphOp<int64>(input_indexs[1].gnode, &axes);
}

if (axes.empty())
{
// no axes input
auto noop_with_empty_axes =
node.get_attribute_value<int64>("noop_with_empty_axes", 0);
// When this attribute is true, the output tensor would be equivalent
// to input tensor.
if (noop_with_empty_axes)
{
NamedNodeVector ret;
ret.push_back({node_proto.output(0), input_index.gnode});
return ret;
}
}

nnfusion::AxisSet reduction_axes;
{
if (axes.empty())
{
auto axes_uint = get_default_order(input_shape);
std::copy(axes_uint.begin(),
axes_uint.end(),
std::inserter(reduction_axes, reduction_axes.end()));
}
else
{
for (auto axis : axes)
{
reduction_axes.insert(axis += axis < 0 ? input_shape.size() : 0);
}
}
}

// Add prologue op
auto pro_gnode = set_1::AddPrologueOrEpilogueOp<PrologueOp>(
m_graph, input_index.gnode, reduction_axes);

auto sum_op = std::make_shared<ReduceOp>(reduction_axes);
auto sum_gnode = m_graph->add_node_and_edge(sum_op, {pro_gnode});

// Add epilogue op
auto epi_gnode = set_1::AddPrologueOrEpilogueOp<EpilogueOp>(
m_graph, sum_gnode, reduction_axes);

NamedNodeVector ret;
if (keepdims)
{
nnfusion::Shape result_shape_with_keep(input_shape.size());

for (size_t i = 0; i < input_shape.size(); i++)
{
result_shape_with_keep[i] =
reduction_axes.count(i) == 0 ? input_shape[i] : 1;
}
nnfusion::AxisVector axis_order(epi_gnode->get_shape().size());
std::iota(axis_order.begin(), axis_order.end(), 0);
auto reshape_op =
std::make_shared<op::Reshape>(axis_order, result_shape_with_keep);
reshape_op->set_name(node_proto.output(0));
auto reshape_gnode = m_graph->add_node_and_edge(reshape_op, {epi_gnode});
ret.push_back({node_proto.output(0), reshape_gnode});
}
else
{
epi_gnode->get_op_ptr()->set_name(node_proto.output(0));
ret.push_back({node_proto.output(0), epi_gnode});
}

return ret;
}

} // namespace set_18

} // namespace onnx_import

} // namespace frontend
Expand Down
20 changes: 20 additions & 0 deletions src/nnfusion/frontend/onnx_import/ops_bridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "op/bias_gelu.hpp"
#include "op/binaryop.hpp"
#include "op/cast.hpp"
#include "op/cast_like.hpp"
#include "op/clip.hpp"
#include "op/concat.hpp"
#include "op/const_of_shape.hpp"
Expand Down Expand Up @@ -187,6 +188,7 @@ namespace nnfusion
REGISTER_OPERATOR("Cast", 6, TranslateCastOp);
REGISTER_OPERATOR("Cast", 9, TranslateCastOp);
REGISTER_OPERATOR("Cast", 13, TranslateCastOp);
REGISTER_OPERATOR("CastLike", 15, TranslateCastLikeOp);
REGISTER_OPERATOR("Ceil", 1, TranslateUnaryOp<op::Ceiling>);
REGISTER_OPERATOR("Ceil", 6, TranslateUnaryOp<op::Ceiling>);
REGISTER_OPERATOR("Ceil", 13, TranslateUnaryOp<op::Ceiling>);
Expand Down Expand Up @@ -351,24 +353,32 @@ namespace nnfusion
"ReduceL1", 11, PACK(TranslateReduceOp<op::Abs, op::Sum, op::NoOp>));
REGISTER_OPERATOR(
"ReduceL1", 13, PACK(TranslateReduceOp<op::Abs, op::Sum, op::NoOp>));
REGISTER_OPERATOR(
"ReduceL1", 18, PACK(TranslateReduceOp<op::Abs, op::Sum, op::NoOp>));
REGISTER_OPERATOR(
"ReduceL2", 1, PACK(TranslateReduceOp<op::Square, op::Sum, op::Sqrt>));
REGISTER_OPERATOR(
"ReduceL2", 11, PACK(TranslateReduceOp<op::Square, op::Sum, op::Sqrt>));
REGISTER_OPERATOR(
"ReduceL2", 13, PACK(TranslateReduceOp<op::Square, op::Sum, op::Sqrt>));
REGISTER_OPERATOR(
"ReduceL2", 18, PACK(TranslateReduceOp<op::Square, op::Sum, op::Sqrt>));
REGISTER_OPERATOR(
"ReduceLogSum", 1, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSum", 11, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSum", 13, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSum", 18, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSumExp", 1, PACK(TranslateReduceOp<op::Exp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSumExp", 11, PACK(TranslateReduceOp<op::Exp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSumExp", 13, PACK(TranslateReduceOp<op::Exp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceLogSumExp", 18, PACK(TranslateReduceOp<op::Exp, op::Sum, op::Log>));
REGISTER_OPERATOR(
"ReduceMax", 1, PACK(TranslateReduceOp<op::NoOp, op::Max, op::NoOp>));
REGISTER_OPERATOR(
Expand All @@ -377,12 +387,16 @@ namespace nnfusion
"ReduceMax", 12, PACK(TranslateReduceOp<op::NoOp, op::Max, op::NoOp>));
REGISTER_OPERATOR(
"ReduceMax", 13, PACK(TranslateReduceOp<op::NoOp, op::Max, op::NoOp>));
REGISTER_OPERATOR(
"ReduceMax", 18, PACK(TranslateReduceOp<op::NoOp, op::Max, op::NoOp>));
REGISTER_OPERATOR(
"ReduceMean", 1, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Divide>));
REGISTER_OPERATOR(
"ReduceMean", 11, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Divide>));
REGISTER_OPERATOR(
"ReduceMean", 13, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Divide>));
REGISTER_OPERATOR(
"ReduceMean", 18, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::Divide>));
REGISTER_OPERATOR(
"ReduceMin", 1, PACK(TranslateReduceOp<op::NoOp, op::Min, op::NoOp>));
REGISTER_OPERATOR(
Expand All @@ -391,12 +405,16 @@ namespace nnfusion
"ReduceMin", 12, PACK(TranslateReduceOp<op::NoOp, op::Min, op::NoOp>));
REGISTER_OPERATOR(
"ReduceMin", 13, PACK(TranslateReduceOp<op::NoOp, op::Min, op::NoOp>));
REGISTER_OPERATOR(
"ReduceMin", 18, PACK(TranslateReduceOp<op::NoOp, op::Min, op::NoOp>));
REGISTER_OPERATOR(
"ReduceProd", 1, PACK(TranslateReduceOp<op::NoOp, op::Product, op::NoOp>));
REGISTER_OPERATOR(
"ReduceProd", 11, PACK(TranslateReduceOp<op::NoOp, op::Product, op::NoOp>));
REGISTER_OPERATOR(
"ReduceProd", 13, PACK(TranslateReduceOp<op::NoOp, op::Product, op::NoOp>));
REGISTER_OPERATOR(
"ReduceProd", 18, PACK(TranslateReduceOp<op::NoOp, op::Product, op::NoOp>));
REGISTER_OPERATOR(
"ReduceSum", 1, PACK(TranslateReduceOp<op::NoOp, op::Sum, op::NoOp>));
REGISTER_OPERATOR(
Expand All @@ -408,6 +426,8 @@ namespace nnfusion
"ReduceSumSquare", 11, PACK(TranslateReduceOp<op::Square, op::Sum, op::NoOp>));
REGISTER_OPERATOR(
"ReduceSumSquare", 13, PACK(TranslateReduceOp<op::Square, op::Sum, op::NoOp>));
REGISTER_OPERATOR(
"ReduceSumSquare", 18, PACK(TranslateReduceOp<op::Square, op::Sum, op::NoOp>));
REGISTER_OPERATOR("Relu", 1, TranslateUnaryOp<op::Relu>);
REGISTER_OPERATOR("Reshape", 1, TranslateReshapeOp);
REGISTER_OPERATOR("ReshapeGrad", 1, TranslateReshapeGradOp);
Expand Down
26 changes: 14 additions & 12 deletions test/python/default_operators.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ ArgMin
Asin
Atan
AveragePool
BatchNormalization
BatchNorm
Cast
Ceil
Clip
Expand Down Expand Up @@ -38,7 +38,7 @@ GreaterOrEqual
Identity
InstanceNormalization
LSTM
LayerNormalization
Layer_Normalization
LeakyRelu
Less
LessOrEqual
Expand All @@ -57,23 +57,25 @@ OneHot
Or
Pow
Range
ReduceL1
ReduceL2
ReduceLogSum
ReduceLogSumExp
ReduceMax
ReduceMean
ReduceMin
ReduceProd
ReduceSum
ReduceSumSquare
Reciprocal
Reduce_L1
Reduce_L2
Reduce_Log_Sum
Reduce_Log_Sum_Exp
Reduce_Max
Reduce_Mean
Reduce_Min
Reduce_Prod
Reduce_Sum
Reduce_Sum_Square
Relu
Reshape
Resize
ScatterND
Shape
Sigmoid
Sin
Size
Slice
Softmax
SoftmaxCrossEntropyLoss
Expand Down
Loading

0 comments on commit 1e88dca

Please sign in to comment.