Skip to content

Implement Layer abstraction for all layers #56

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 22 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions app/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
add_subdirectory(example)
add_subdirectory(layer_example)
2 changes: 1 addition & 1 deletion app/example/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
add_executable(example main.cpp)
add_executable(example main.cpp)
11 changes: 11 additions & 0 deletions app/layer_example/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary")

add_executable(Concat ConcatLayer.cpp)

include_directories(${ARM_DIR})
include_directories(${ARM_DIR}/include)
target_link_directories(Concat PUBLIC ${ARM_DIR}/build)

target_link_libraries(Concat arm_compute)

add_dependencies(Concat build_compute_library)
36 changes: 36 additions & 0 deletions app/layer_example/ConcatLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#include <iostream>
#include "arm_compute/runtime/NEON/NEFunctions.h"
#include "utils/Utils.h"

using namespace arm_compute;
using namespace utils;

int main() {
Tensor input1, input2;
Tensor output;
std::vector<const ITensor *> input;

const int input_width = 3;
const int input_height = 3;
const int axis = 2;

input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32));
input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32));

input1.allocator()->allocate();
input2.allocator()->allocate();

fill_random_tensor(input1, 0.f, 1.f);
fill_random_tensor(input2, 0.f, 1.f);

input.push_back(&input1);
input.push_back(&input2);

NEConcatenateLayer concat;
concat.configure(input, &output, axis);
output.allocator()->allocate();

concat.run();

output.print(std::cout);
}
36 changes: 36 additions & 0 deletions include/layer/layer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#ifndef LAYER_H
#define LAYER_H

#include <list>

#include "arm_compute/runtime/NEON/NEFunctions.h"
#include "utils/Utils.h"

using namespace arm_compute;
using namespace utils;

struct LayerAttributes {
int id = -1;
};

class Layer {
protected:
int id_;

public:
Layer() = default;
explicit Layer(const LayerAttributes& attrs) : id_(attrs.id) {}
virtual ~Layer() = default;
void setID(int id) { id_ = id; }
int getID() const { return id_; }
virtual std::string getInfoString() const;
virtual void exec(Tensor& input, Tensor& output) = 0;
virtual void exec(Tensor& input1, Tensor& input2, Tensor& output) = 0;
//virtual Shape get_output_shape() = 0;

virtual std::string get_type_name() const = 0;
void addNeighbor(Layer* neighbor);
void removeNeighbor(Layer* neighbor);
std::list<Layer*> neighbors_;
};
#endif
54 changes: 54 additions & 0 deletions src/layer/ConcatenateLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#ifndef ACL_CONCATENATE_LAYER_H
#define ACL_CONCATENATE_LAYER_H

#include <numeric>
#include <stdexcept>
#include <string>
#include <vector>

#include "include/layer/layer.h"

class ConcatenateLayer : public Layer {
private:
std::vector<TensorShape> input_shapes_config_;
TensorShape output_shape_;
unsigned int concatenation_axis_;
bool configured_ = false;

public:
ConcatenateLayer(int id) { setID(id); }

void configure(const std::vector<TensorShape>& inputs_shapes, unsigned int axis, TensorShape& output_shape_ref) {
if (inputs_shapes.empty()) {
throw std::runtime_error("Concat: Input shapes list cannot be empty.");
}

input_shapes_config_ = inputs_shapes;
concatenation_axis_ = axis;
output_shape_ = output_shape_ref;
configured_ = true;
}

void exec(std::vector<const ITensor*>& input, Tensor& output) {
if (!configured_) {
throw std::runtime_error("ConcatenateLayer: Layer not configured.");
}
if (input.size() != input_shapes_config_.size()) {
throw std::runtime_error("ConcatenateLayer: different sizes of vectors.");
}

output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32));

NEConcatenateLayer concat;
concat.configure(input, &output, concatenation_axis_);
output.allocator()->allocate();

concat.run();
}

std::string get_type_name() const override {
return "ConcatenateLayer";
}
};

#endif
79 changes: 79 additions & 0 deletions src/layer/ConvLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#ifndef ACL_CONVOLUTION_LAYER_SIMPLIFIED_H
#define ACL_CONVOLUTION_LAYER_SIMPLIFIED_H

#include <numeric>
#include <stdexcept>
#include <string>
#include <vector>

#include "include/layer/layer.h"

class ConvolutionLayer : public Layer {
private:

TensorShape input_shape_;
TensorShape weights_shape_;
TensorShape biases_shape_;
TensorShape output_shape_;
Tensor* biase_t;
Tensor* weight_t;
PadStrideInfo psi;

bool configured_ = false;

public:
ConvolutionLayer(int id) { setID(id); }

void configure(
const TensorShape& input_s,
const TensorShape& weights_s,
Tensor& weights_t,
const TensorShape& biases_s,
Tensor& biases_t,
TensorShape& output_s_ref,
const PadStrideInfo& info
) {

input_shape_ = input_s;
weights_shape_ = weights_s;
biases_shape_ = biases_s;
psi = info;
output_shape_ = output_s_ref;

NECopy copyb, copyw;
copyb.configure(biase_t, &biases_t);
copyb.run();
copyw.configure(weight_t, &weights_t);
copyw.run();

weight_t->allocator()->init(TensorInfo(weights_shape_, 1, DataType::F32));
biase_t->allocator()->init(TensorInfo(biases_shape_, 1, DataType::F32));

weight_t->allocator()->allocate();
biase_t->allocator()->allocate();

configured_ = true;
}

void exec(Tensor& input, Tensor& output) override {
if (!configured_) {
throw std::runtime_error("ConvolutionLayer: Layer not configured.");
}

input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32));
output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32));

input.allocator()->allocate();
output.allocator()->allocate();

NEConvolutionLayer conv;
conv.configure(&input, weight_t, biase_t, &output, psi);
conv.run();
}

std::string get_type_name() const override {
return "ConvolutionLayer";
}
};

#endif
142 changes: 142 additions & 0 deletions src/layer/ElementwiseLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
#ifndef ACL_ELEMENTWISE_LAYER_H
#define ACL_ELEMENTWISE_LAYER_H

#include <numeric>
#include <stdexcept>
#include <string>
#include <vector>

#include "include/layer/layer.h"

using namespace arm_compute;
using namespace utils;

enum class ElementwiseOp {
ADD,
DIV,
ABS,
SIGM,
SWISH,
SQUARED_DIFF
};

class ElementwiseLayer : public Layer {
private:
ElementwiseOp op_type_;
TensorShape input1_shape, input2_shape;
TensorShape output_shape;
bool configured_ = false;

public:
ElementwiseLayer(int id, ElementwiseOp op) : op_type_(op) { setID(id); }

ElementwiseLayer() : ElementwiseLayer(0, ElementwiseOp::ADD) {}

void configure(const TensorShape& input_shape, TensorShape& output_shape_) {
input1_shape = input_shape;
output_shape = input_shape;
configured_ = true;
}

void configure(const TensorShape& input1_shape_, const TensorShape& input2_shape_, TensorShape& output_shape_) {
if (input1_shape.total_size() != input2_shape.total_size()) {
throw std::runtime_error(
"ElementwiseLayer: Input shapes must have same total size");
}

input1_shape = input1_shape_;
input2_shape = input2_shape_;
output_shape = output_shape_;
configured_ = true;
}

void exec(Tensor& input, Tensor& output) override {
if (!configured_) {
throw std::runtime_error(
"ElementwiseLayer: Layer not configured before exec.");
}

input.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32));
output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32));

input.allocator()->allocate();
output.allocator()->allocate();

switch (op_type_) {
case ElementwiseOp::ABS: {
NEActivationLayer abs;
abs.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS));
abs.run();
break;
}
case ElementwiseOp::SIGM: {
NEActivationLayer sigm;
sigm.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
sigm.run();
break;
}
case ElementwiseOp::SWISH: {
NEActivationLayer swish;
swish.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH));
swish.run();
break;
}
default:
throw std::runtime_error(
"ElementwiseLayer: This operation requires two inputs");
}
}

void exec(Tensor& input1, Tensor& input2, Tensor& output) {
if (!configured_) {
throw std::runtime_error(
"ElementwiseLayer: Layer not configured before exec.");
}

input1.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32));
input2.allocator()->init(TensorInfo(input2_shape, 1, DataType::F32));
output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32));

input1.allocator()->allocate();
input2.allocator()->allocate();
output.allocator()->allocate();

switch (op_type_) {
case ElementwiseOp::ADD: {
NEArithmeticAddition add;
add.configure(&input1, &input2, &output, ConvertPolicy::WRAP);
add.run();
break;
}
case ElementwiseOp::DIV: {
NEElementwiseDivision div;
div.configure(&input1, &input2, &output);
div.run();
break;
}
case ElementwiseOp::SQUARED_DIFF: {
NEElementwiseSquaredDiff sqdiff;
sqdiff.configure(&input1, &input2, &output);
sqdiff.run();
break;
}
default:
throw std::runtime_error(
"ElementwiseLayer: This operation requires single input");
}
}

std::string get_type_name() const override {
switch (op_type_) {
case ElementwiseOp::ADD: return "ElementwiseAddLayer";
case ElementwiseOp::DIV: return "ElementwiseDivLayer";
case ElementwiseOp::ABS: return "ElementwiseAbsLayer";
case ElementwiseOp::SIGM: return "ElementwiseSigmoidLayer";
case ElementwiseOp::SWISH: return "ElementwiseSwishLayer";
case ElementwiseOp::SQUARED_DIFF: return "ElementwiseSquaredDiffLayer";
default:return "ElementwiseUnknownLayer";
}
}
};

#endif
Loading
Loading