Skip to content

Commit

Permalink
Merge branch 'main' into AndreySorokin7/accuracy_verification
Browse files Browse the repository at this point in the history
  • Loading branch information
AndreySorokin7 authored Aug 30, 2024
2 parents da41482 + dfa1fb9 commit 600b40c
Show file tree
Hide file tree
Showing 34 changed files with 966 additions and 177 deletions.
5 changes: 4 additions & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,7 @@
url = https://github.com/opencv/opencv
[submodule "3rdparty/TBB"]
path = 3rdparty/TBB
url = https://github.com/oneapi-src/oneTBB.git
url = https://github.com/oneapi-src/oneTBB
[submodule "3rdparty/Json"]
path = 3rdparty/Json
url = https://github.com/nlohmann/json
1 change: 1 addition & 0 deletions 3rdparty/Json
Submodule Json added at 8c391e
18 changes: 18 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,24 @@ cmake_minimum_required(VERSION 3.20)

project(itlab_2023)

option(ENABLE_STATISTIC_TENSORS "Enable statistic tensors" OFF)

if(ENABLE_STATISTIC_TENSORS)
add_definitions(-DENABLE_STATISTIC_TENSORS)
endif()

option(ENABLE_STATISTIC_TIME "Enable statistic time" OFF)

if(ENABLE_STATISTIC_TIME)
add_definitions(-DENABLE_STATISTIC_TIME)
endif()

option(ENABLE_STATISTIC_WEIGHTS "Enable statistic weights" OFF)

if(ENABLE_STATISTIC_WEIGHTS)
add_definitions(-DENABLE_STATISTIC_WEIGHTS)
endif()

set(CMAKE_CXX_STANDARD 17)

enable_testing()
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,5 +117,7 @@ To start the testing process locally, you need to go to the directory
# **Some files used to create the library**
### *neural network models*
[Alexnet-model.h5](https://github.com/moizahmed97/Convolutional-Neural-Net-Designer/blob/master/AlexNet-model.h5)
# **How do I get a file with the weights of the model to launch the inference?**
You need to run the script parcer.py that is located in app/AlexNet to read weights from a model Alexnet-model.h5 and the json file with the weights will be stored in the docs folder.
# **Structure of our library**
![Class diagram](./docs/class_diagram.svg)
6 changes: 6 additions & 0 deletions app/AlexNet/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
add_executable(Reader_weights reader_weights_sample.cpp)

target_link_libraries(Reader_weights PUBLIC perf_lib layers_lib reader_lib)

add_definitions(-DMODEL_PATH="${CMAKE_SOURCE_DIR}/docs/model_data_alexnet_1.json")

34 changes: 21 additions & 13 deletions app/AlexNet/parser.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,46 @@
import tensorflow as tf
from tensorflow.keras.models import load_model
import pickle
import joblib
# Путь к вашей модели .h5
MODEL_PATH = 'AlexNet-model.h5'
import json
import numpy as np
import os

BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MODEL_PATH = os.path.join(BASE_DIR, 'docs', 'AlexNet-model.h5')
MODEL_DATA_PATH = os.path.join(BASE_DIR, 'docs', 'model_data_alexnet_1.json')


# Загрузка модели
model = load_model(MODEL_PATH)

# Получение графа модели
graph = tf.compat.v1.get_default_graph()

# Получение весов модели
weights = model.get_weights()

MODEL_DATA_PATH = 'model_data_alexnet.joblib'

# Сохранение имен слоев и весов модели
layer_weights = {}
for layer in model.layers:
layer_name = layer.name
layer_weights[layer_name] = layer.get_weights()
# Преобразование весов в списки для совместимости с JSON
layer_weights[layer_name] = [w.tolist() for w in layer.get_weights()]

with open(MODEL_DATA_PATH, 'wb') as f:
joblib.dump(layer_weights, f)
# Сохранение данных в JSON файл
with open(MODEL_DATA_PATH, 'w') as f:
json.dump(layer_weights, f, indent=2) # добавляем отступы для лучшей читаемости

print(f"Model data saved to {MODEL_DATA_PATH}")

# Загрузка данных
loaded_model_data = joblib.load(MODEL_DATA_PATH)
with open(MODEL_DATA_PATH, 'r') as f:
loaded_model_data = json.load(f)

# Преобразование данных обратно в numpy массивы
for layer_name, weights in loaded_model_data.items():
loaded_model_data[layer_name] = [np.array(w) for w in weights]

# Вывод данных
print("Model layers and weights:")
for layer_name, weights in loaded_model_data.items():
print("Layer:", layer_name)
print("Weights:", weights)
for weight in weights:
print(weight)
print()
23 changes: 23 additions & 0 deletions app/AlexNet/reader_weights_sample.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#include <iostream>

#include "Weights_Reader/reader_weights.hpp"

int main() {
std::string json_file = MODEL_PATH;
json model_data = read_json(json_file);

for (auto& layer : model_data.items()) {
std::string layer_name = layer.key();
std::cout << "Layer: " << layer_name << std::endl;

try {
Tensor tensor = create_tensor_from_json(layer.value(), Type::kFloat);
std::cout << tensor << std::endl;
} catch (const std::exception& e) {
std::cerr << "Error processing layer " << layer_name << ": " << e.what()
<< std::endl;
}
}

return 0;
}
2 changes: 2 additions & 0 deletions app/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
add_subdirectory(ReaderImage)

add_subdirectory(Accuracy)
add_subdirectory(AlexNet)
Binary file added docs/AlexNet-model.h5
Binary file not shown.
3 changes: 3 additions & 0 deletions include/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,6 @@ set(LAYERS_HEADERS "${layers_headers}" PARENT_SCOPE)

file(GLOB_RECURSE perf_headers perf/*.h perf/*.hpp)
set(PERF_HEADERS "${perf_headers}" PARENT_SCOPE)

file(GLOB_RECURSE reader_headers Weights_Reader/*.h Weights_Reader/*.hpp)
set(READER_HEADERS "${reader_headers}" PARENT_SCOPE)
16 changes: 16 additions & 0 deletions include/Weights_Reader/reader_weights.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#include <nlohmann/json.hpp>
#include <string>
#include <vector>

#include "layers/Tensor.hpp"

using json = nlohmann::json;
using namespace itlab_2023;

json read_json(const std::string& filename);
void extract_values_without_bias(const json& j, std::vector<float>& values);
void extract_values_from_json(const json& j, std::vector<float>& values);
void parse_json_shape(const json& j, std::vector<size_t>& shape,
size_t dim = 0);
Tensor create_tensor_from_json(const json& j, Type type);
void extract_bias_from_json(const json& j, std::vector<float>& bias);
36 changes: 36 additions & 0 deletions include/graph/graph.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#pragma once
#include <algorithm>
#include <chrono>
#include <queue>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>

#include "layers/Layer.hpp"
Expand All @@ -19,6 +21,15 @@ class Graph {
Tensor* outten_;
int start_;
int end_;
#ifdef ENABLE_STATISTIC_TENSORS
std::vector<Tensor> tensors_;
#endif
#ifdef ENABLE_STATISTIC_TIME
std::vector<int> time_;
#endif
#ifdef ENABLE_STATISTIC_WEIGHTS
std::vector<Tensor> weights_;
#endif

public:
Graph(int vertices) : BiggestSize_(vertices) {
Expand Down Expand Up @@ -90,13 +101,38 @@ class Graph {
}
}
for (int i : traversal) {
#ifdef ENABLE_STATISTIC_TIME
auto start = std::chrono::high_resolution_clock::now();
#endif
layers_[i]->run(inten_, *outten_);
#ifdef ENABLE_STATISTIC_TENSORS
tensors_.push_back(inten_);
tensors_.push_back(*outten_);
#endif
#ifdef ENABLE_STATISTIC_WEIGHTS
weights_.push_back(layers_[i]->get_weights());
#endif
inten_ = *outten_;
#ifdef ENABLE_STATISTIC_TIME
auto end = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
time_.push_back(static_cast<int>(elapsed.count()));
#endif
}
}
void setOutput(const Layer& lay, Tensor& vec) {
end_ = lay.getID();
outten_ = &vec;
}
#ifdef ENABLE_STATISTIC_TENSORS
std::vector<Tensor> getTensors() { return tensors_; }
#endif
#ifdef ENABLE_STATISTIC_TIME
std::vector<int> getTime() { return time_; }
#endif
#ifdef ENABLE_STATISTIC_WEIGHTS
std::vector<Tensor> getWEIGHTS() { return weights_; }
#endif
};
} // namespace itlab_2023
46 changes: 31 additions & 15 deletions include/layers/ConvLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,23 @@ class ConvolutionalLayer : public Layer {
size_t pads_;
size_t dilations_;
Tensor kernel_;
Tensor bias_;

public:
ConvolutionalLayer() = default;
ConvolutionalLayer(size_t step, size_t pads, size_t dilations,
const Tensor& kernel) {
const Tensor& kernel, const Tensor& bias = Tensor()) {
stride_ = step;
pads_ = pads;
dilations_ = dilations;
kernel_ = kernel;
bias_ = bias;
}

void run(const Tensor& input, Tensor& output) override;
#ifdef ENABLE_STATISTIC_WEIGHTS
Tensor get_weights() override { return kernel_; }
#endif
};

template <typename ValueType>
Expand All @@ -36,63 +42,72 @@ class ConvImpl : public LayerImpl<ValueType> {
size_t pads_;
size_t dilations_;
size_t input_size_;
std::vector<ValueType> bias_;

public:
ConvImpl() = delete;
ConvImpl(size_t stride, size_t pads, size_t dilations, int input_width,
int input_height, int input_flow, size_t input_size)
int input_height, int input_flow, size_t input_size,
const std::vector<ValueType>& bias)
: input_width_(input_width),
input_height_(input_height),
input_flow_(input_flow),
stride_(stride),
pads_(pads),
dilations_(dilations),
input_size_(input_size) {}
input_size_(input_size),
bias_(bias) {}

ConvImpl(const ConvImpl& c) = default;

std::vector<ValueType> run(
const std::vector<ValueType>& input) const override {
return input;
}

std::vector<ValueType> run(std::vector<ValueType> startmatrix, int new_rows,
int new_cols, std::vector<ValueType> startkernel,
size_t start_kernel_size, size_t kernel_size,
int center_distance) const {
std::vector<ValueType> matrix(new_rows * new_cols * input_flow_, 0);
for (int i = 0; i < input_height_; ++i) {
for (int j = 0; j < input_width_; ++j) {
matrix[((i + pads_) * new_cols + j + pads_) * input_flow_] =
startmatrix[(i * input_width_ + j) * input_flow_];
matrix[((i + pads_) * new_cols + j + pads_) * input_flow_ + 1] =
startmatrix[(i * input_width_ + j) * input_flow_ + 1];
matrix[((i + pads_) * new_cols + j + pads_) * input_flow_ + 2] =
startmatrix[(i * input_width_ + j) * input_flow_ + 2];
for (int f = 0; f < input_flow_; ++f) {
matrix[((i + pads_) * new_cols + j + pads_) * input_flow_ + f] =
startmatrix[(i * input_width_ + j) * input_flow_ + f];
}
}
}

std::vector<ValueType> kernel(kernel_size * kernel_size, 0);
for (int i = 0; i < static_cast<int>(start_kernel_size); i++) {
for (int j = 0; j < static_cast<int>(start_kernel_size); j++) {
for (int i = 0; i < static_cast<int>(start_kernel_size); ++i) {
for (int j = 0; j < static_cast<int>(start_kernel_size); ++j) {
kernel[(dilations_ + i) * static_cast<int>(kernel_size) + j +
(j + 1) * dilations_] =
startkernel[i * static_cast<int>(start_kernel_size) + j];
}
}

std::vector<ValueType> outputvec;
for (int i = input_width_ + center_distance;
i < static_cast<int>(input_size_); i += static_cast<int>(stride_)) {
for (int x = 0; x < 3; x++) {
for (int x = 0; x < input_flow_; ++x) {
ValueType color = 0;
for (int coloms = -input_width_; coloms < input_width_ + 1;
coloms += input_width_) {
for (int str = -1; str < 2; str++) {
for (int str = -1; str < 2; ++str) {
if (input_width_ == 0) {
throw std::out_of_range("Input = 0");
}
auto kercol = static_cast<size_t>(coloms / input_width_ + 1);
color +=
matrix[(i + coloms + str) * 3 + x] *
matrix[(i + coloms + str) * input_flow_ + x] *
kernel[kercol * kernel_size + static_cast<size_t>(str + 1)];
}
}
if (!bias_.empty() && static_cast<size_t>(x) < bias_.size()) {
color += bias_[x];
}
outputvec.push_back(color);
}
if ((i + center_distance + 1) % input_width_ == 0) {
Expand All @@ -108,4 +123,5 @@ class ConvImpl : public LayerImpl<ValueType> {
return outputvec;
}
};
} // namespace itlab_2023

} // namespace itlab_2023
10 changes: 9 additions & 1 deletion include/layers/EWLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,16 @@ class EWLayer : public Layer {
EWLayer() = default;
EWLayer(std::string function, float alpha = 0.0F, float beta = 0.0F)
: func_(std::move(function)), alpha_(alpha), beta_(beta) {}

static std::string get_name() { return "Element-wise layer"; }
void run(const Tensor& input, Tensor& output) override;

#ifdef ENABLE_STATISTIC_WEIGHTS
Tensor get_weights() override {
std::vector<int> v = {0};
Tensor a = make_tensor(v);
return a;
}
#endif
private:
std::string func_;
float alpha_;
Expand Down Expand Up @@ -85,4 +92,5 @@ std::vector<ValueType> EWLayerImpl<ValueType>::run(
}
return res;
}

} // namespace itlab_2023
5 changes: 3 additions & 2 deletions include/layers/FCLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ class FCLayer : public Layer {
}
static std::string get_name() { return "Fully-connected layer"; }
void run(const Tensor& input, Tensor& output) override;
#ifdef ENABLE_STATISTIC_WEIGHTS
Tensor get_weights() override { return weights_; }
#endif
};

template <typename ValueType>
Expand Down Expand Up @@ -108,9 +111,7 @@ FCLayerImpl<ValueType>::FCLayerImpl(const std::vector<ValueType>& input_weights,
if (this->inputShape_[0] == 0 || this->outputShape_[0] == 0) {
throw std::invalid_argument("Invalid weights/bias size for FCLayer");
}
// make weights isize x osize, filling empty with 0s
weights_.resize(input_weights_shape.count(), ValueType(0));
//
}

template <typename ValueType>
Expand Down
Loading

0 comments on commit 600b40c

Please sign in to comment.