diff --git a/CMakeLists.txt b/CMakeLists.txt index ff68e2e0..5e8cb120 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,24 @@ cmake_minimum_required(VERSION 3.20) project(itlab_2023) +option(ENABLE_STATISTIC_TENSORS "Enable statistic tensors" OFF) + +if(ENABLE_STATISTIC_TENSORS) + add_definitions(-DENABLE_STATISTIC_TENSORS) +endif() + +option(ENABLE_STATISTIC_TIME "Enable statistic time" OFF) + +if(ENABLE_STATISTIC_TIME) + add_definitions(-DENABLE_STATISTIC_TIME) +endif() + +option(ENABLE_STATISTIC_WEIGHTS "Enable statistic weights" OFF) + +if(ENABLE_STATISTIC_WEIGHTS) + add_definitions(-DENABLE_STATISTIC_WEIGHTS) +endif() + set(CMAKE_CXX_STANDARD 17) enable_testing() diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index fc3e87e7..2c4800b3 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -1,8 +1,10 @@ #pragma once #include +#include #include #include #include +#include #include #include "layers/Layer.hpp" @@ -19,6 +21,15 @@ class Graph { Tensor* outten_; int start_; int end_; +#ifdef ENABLE_STATISTIC_TENSORS + std::vector tensors_; +#endif +#ifdef ENABLE_STATISTIC_TIME + std::vector time_; +#endif +#ifdef ENABLE_STATISTIC_WEIGHTS + std::vector weights_; +#endif public: Graph(int vertices) : BiggestSize_(vertices) { @@ -90,13 +101,38 @@ class Graph { } } for (int i : traversal) { +#ifdef ENABLE_STATISTIC_TIME + auto start = std::chrono::high_resolution_clock::now(); +#endif layers_[i]->run(inten_, *outten_); +#ifdef ENABLE_STATISTIC_TENSORS + tensors_.push_back(inten_); + tensors_.push_back(*outten_); +#endif +#ifdef ENABLE_STATISTIC_WEIGHTS + weights_.push_back(layers_[i]->get_weights()); +#endif inten_ = *outten_; +#ifdef ENABLE_STATISTIC_TIME + auto end = std::chrono::high_resolution_clock::now(); + auto elapsed = + std::chrono::duration_cast(end - start); + time_.push_back(static_cast(elapsed.count())); +#endif } } void setOutput(const Layer& lay, Tensor& vec) { end_ = lay.getID(); outten_ = &vec; } +#ifdef ENABLE_STATISTIC_TENSORS + std::vector getTensors() { return tensors_; } +#endif +#ifdef ENABLE_STATISTIC_TIME + std::vector getTime() { return time_; } +#endif +#ifdef ENABLE_STATISTIC_WEIGHTS + std::vector getWEIGHTS() { return weights_; } +#endif }; } // namespace itlab_2023 diff --git a/include/layers/ConvLayer.hpp b/include/layers/ConvLayer.hpp index b9df27bc..360e2bfe 100644 --- a/include/layers/ConvLayer.hpp +++ b/include/layers/ConvLayer.hpp @@ -24,6 +24,9 @@ class ConvolutionalLayer : public Layer { kernel_ = kernel; } void run(const Tensor& input, Tensor& output) override; +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { return kernel_; } +#endif }; template diff --git a/include/layers/EWLayer.hpp b/include/layers/EWLayer.hpp index 8254074a..4b2dfbcd 100644 --- a/include/layers/EWLayer.hpp +++ b/include/layers/EWLayer.hpp @@ -23,7 +23,13 @@ class EWLayer : public Layer { : func_(std::move(function)), alpha_(alpha), beta_(beta) {} static std::string get_name() { return "Element-wise layer"; } void run(const Tensor& input, Tensor& output) override; - +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { + std::vector v = {0}; + Tensor a = make_tensor(v); + return a; + } +#endif private: std::string func_; float alpha_; diff --git a/include/layers/FCLayer.hpp b/include/layers/FCLayer.hpp index 5384f326..89414593 100644 --- a/include/layers/FCLayer.hpp +++ b/include/layers/FCLayer.hpp @@ -23,6 +23,9 @@ class FCLayer : public Layer { : weights_(std::move(weights)), bias_(bias), implType_(implType) {} static std::string get_name() { return "Fully-connected layer"; } void run(const Tensor& input, Tensor& output) override; +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { return weights_; } +#endif }; template diff --git a/include/layers/InputLayer.hpp b/include/layers/InputLayer.hpp index 6876062d..adb3e901 100644 --- a/include/layers/InputLayer.hpp +++ b/include/layers/InputLayer.hpp @@ -26,6 +26,13 @@ class InputLayer : public Layer { mean_ = mean; std_ = std; } // layout = kNchw(0), kNhwc(1) +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { + std::vector v = {0}; + Tensor a = make_tensor(v); + return a; + } +#endif void run(const Tensor& input, Tensor& output) override { switch (input.get_type()) { case Type::kInt: { diff --git a/include/layers/Layer.hpp b/include/layers/Layer.hpp index e7dacfa0..0157803b 100644 --- a/include/layers/Layer.hpp +++ b/include/layers/Layer.hpp @@ -32,6 +32,9 @@ class Layer { LayerType getName() const { return type_; } void setName(LayerType type) { type_ = type; } virtual void run(const Tensor& input, Tensor& output) = 0; +#ifdef ENABLE_STATISTIC_WEIGHTS + virtual Tensor get_weights() = 0; +#endif private: int id_; diff --git a/include/layers/OutputLayer.hpp b/include/layers/OutputLayer.hpp index 77cd113e..e6f10dd6 100644 --- a/include/layers/OutputLayer.hpp +++ b/include/layers/OutputLayer.hpp @@ -15,6 +15,13 @@ class OutputLayer : public Layer { std::vector get_labels() const { return labels_; } std::pair, Tensor> top_k(const Tensor& input, size_t k) const; +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { + std::vector v = {0}; + Tensor a = make_tensor(v); + return a; + } +#endif private: std::vector labels_; diff --git a/include/layers/PoolingLayer.hpp b/include/layers/PoolingLayer.hpp index 4b82f40a..79b22ce5 100644 --- a/include/layers/PoolingLayer.hpp +++ b/include/layers/PoolingLayer.hpp @@ -20,6 +20,13 @@ class PoolingLayer : public Layer { implType_(implType) {} static std::string get_name() { return "Pooling layer"; } void run(const Tensor& input, Tensor& output) override; +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { + std::vector v = {0}; + Tensor a = make_tensor(v); + return a; + } +#endif private: Shape poolingShape_; diff --git a/test/inference/test_inference.cpp b/test/inference/test_inference.cpp index 58f7ab8e..ac7f5c6e 100644 --- a/test/inference/test_inference.cpp +++ b/test/inference/test_inference.cpp @@ -36,6 +36,46 @@ TEST(bfs, check_result_vec) { graph.inference(); std::vector tmp = *output.as(); std::vector res = {81, 81, 81}; +#ifdef ENABLE_STATISTIC_TENSORS + std::vector tensors = graph.getTensors(); + for (int i = 0; i < tensors.size(); i++) { + std::vector ten = *tensors[i].as(); + for (int j = 0; j < ten.size(); j++) { + std::cout << ten[j] << ' '; + } + std::cout << '\n'; + } +#endif +#ifdef ENABLE_STATISTIC_TIME + std::vector times = graph.getTime(); + for (int j = 0; j < times.size(); j++) { + std::cout << times[j] << ' '; + } + std::cout << '\n'; +#endif +#ifdef ENABLE_STATISTIC_WEIGHTS + std::vector weights = graph.getWEIGHTS(); + for (int i = 0; i < weights.size(); i++) { + switch (weights[i].get_type()) { + case Type::kInt: { + std::vector ten = *weights[i].as(); + for (int j = 0; j < ten.size(); j++) { + std::cout << ten[j] << ' '; + } + std::cout << '\n'; + break; + } + case Type::kFloat: { + std::vector ten = *weights[i].as(); + for (int j = 0; j < ten.size(); j++) { + std::cout << ten[j] << ' '; + } + std::cout << '\n'; + break; + } + } + } +#endif ASSERT_EQ(tmp, res); } TEST(bfs, check_end_to_end) { @@ -66,6 +106,29 @@ TEST(bfs, check_end_to_end) { graph.makeConnection(a5, a6); graph.setOutput(a5, output); graph.inference(); +#ifdef ENABLE_STATISTIC_WEIGHTS + std::vector weights = graph.getWEIGHTS(); + for (int i = 0; i < weights.size(); i++) { + switch (weights[i].get_type()) { + case Type::kInt: { + std::vector ten = *weights[i].as(); + for (int j = 0; j < ten.size(); j++) { + std::cout << ten[j] << ' '; + } + std::cout << '\n'; + break; + } + case Type::kFloat: { + std::vector ten = *weights[i].as(); + for (int j = 0; j < ten.size(); j++) { + std::cout << ten[j] << ' '; + } + std::cout << '\n'; + break; + } + } + } +#endif std::vector tmp = *output.as(); std::vector tmp_output = softmax(*output.as()); std::vector res(3, 21);