-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
onnx2keras #3
base: master
Are you sure you want to change the base?
onnx2keras #3
Changes from all commits
51b28aa
c03b035
e97e7db
93ff6e9
ff898c6
cd347f7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,3 @@ | ||
from .hdf5_parser._hdf5parser import HDF5Parser | ||
from .onnx_parser._onnxparser import ONNXParser | ||
from .pytorch_parser._pytorch_parser import PYTorchParser |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,129 @@ | ||
from nn4mc.parser._parser import Parser | ||
from nn4mc.datastructures import NeuralNetwork | ||
# from ._layerbuilder import * | ||
import h5py | ||
import onnx | ||
import keras | ||
# from onnx2keras import onnx_to_keras | ||
# from nn4mc.parser.onnx_parser.onnx_helpers import HDF5Parser | ||
import numpy as np | ||
from nn4mc.parser.onnx_parser.onnx_helpers import onnx2keras | ||
from nn4mc.parser.hdf5_parser._hdf5parser import HDF5Parser | ||
from tensorflow import keras | ||
|
||
class ONNXParser(Parser): | ||
|
||
def __init__(self, file): | ||
self.file = file | ||
self.nn = NeuralNetwork() | ||
self.nn_input_size = None | ||
|
||
def parse(self): | ||
h5format = onnx2keras(self.file) | ||
|
||
onnx_model = onnx.load(self.file) | ||
# print(type(onnx_model)) | ||
|
||
h5parser = HDF5Parser(h5format) | ||
h5parser.file = self.file | ||
h5parser.onnx_parse(h5format) | ||
self.nn = h5parser.nn | ||
|
||
self.parseModelConfig(onnx_model) | ||
|
||
# parse weights and biases | ||
self.parseWeights(onnx_model) | ||
|
||
# close the file | ||
# h5format.close() | ||
|
||
def parseModelConfig(self, h5file): | ||
|
||
# with h5py.File(self.file_name, 'r') as h5file: #Open hdf5 file | ||
#if not isinstance(h5file, keras.engine.functional.Functional): | ||
configAttr = h5file['/'].attrs['model_config'] #Gets all metadata | ||
#else: | ||
# configAttr = h5file.to_json() | ||
|
||
configJSON = bytesToJSON(configAttr) | ||
|
||
self.parse_nn_input(configJSON['config']) | ||
|
||
#This adds an input layer before everything, not sure if it is | ||
#really neccessary. | ||
#NOTE: Determine if this is neccessary | ||
last_layer = Input('input_1','input') | ||
self.nn.addLayer(last_layer) | ||
|
||
#NOTE: Could check to see if its sequential here | ||
for model_layer in configJSON['config']['layers']: | ||
type_ = model_layer['class_name'] | ||
name = model_layer['config']['name'] | ||
|
||
if type_ in self.builder_map.keys(): | ||
builder = eval(self.builder_map[type_]) | ||
|
||
#Build a layer object from metadata | ||
layer = builder.build_layer(model_layer['config'], name.lower(), type_.lower()) | ||
|
||
self.nn.addLayer(layer) #Add Layer to neural network | ||
self.nn.addEdge(last_layer, layer) | ||
|
||
last_layer = layer | ||
|
||
def _parseONNX(self): | ||
return self.parse.h5format | ||
|
||
def parseWeights(self, h5file, _parseONNX = True): | ||
|
||
weightGroup = h5file.graph.initializer | ||
|
||
if (not _parseONNX): | ||
pass | ||
|
||
else: | ||
for layer in self.nn.iterate_layer_list(): | ||
|
||
id = layer.identifier | ||
|
||
if id in weightGroup and 'max_pooling1d' not in id \ | ||
and 'max_pooling2d' not in id and 'flatten' not in id and \ | ||
'input' not in id: | ||
|
||
gru_keys = [k for k, v in weightGroup[id][id].items() if 'gru_cell' in k] | ||
# kernel/weight assignment | ||
if len(gru_keys) > 0: | ||
weight = np.array(weightGroup[id][id][gru_keys[0]]['kernel:0']) | ||
else: | ||
weight = np.array(weightGroup[id][id]['kernel:0'][()]) | ||
# bias | ||
if len(gru_keys) > 0: | ||
bias = np.array(weightGroup[id][id][gru_keys[0]]['bias:0']) | ||
else: | ||
bias = np.array(weightGroup[id][id]['bias:0'][()]) | ||
# weight | ||
if len(gru_keys) > 0: | ||
rec_weight = np.array(weightGroup[id][id][gru_keys[0]]['recurrent_kernel:0'][()]) | ||
else: | ||
rec_weight = None | ||
|
||
layer.setParameters('weight', (id + '_W', weight)) | ||
layer.setParameters('bias', (id + '_b', bias)) | ||
layer.setParameters('weight_rec', (id + '_Wrec', rec_weight)) | ||
|
||
input_shape = self.nn_input_size | ||
# print('aaaaa', input_shape) | ||
for layer in self.nn.iterate_layer_list(): | ||
if "input" not in layer.identifier: | ||
input_shape = layer.computeOutShape(input_shape) | ||
print(layer.getParameters()) | ||
|
||
def parse_nn_input(self, model_config : dict): | ||
""" | ||
INPUT: model_config is the json object dictionary | ||
OUTPUT: numpy array with the input size of the model | ||
""" | ||
if model_config.get('build_input_shape'): | ||
self.nn_input_size = model_config['build_input_shape'][1:] | ||
if model_config['layers'][0].get('config','batch_input_shape'): | ||
self.nn_input_size = model_config['layers'][0]['config'] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
import onnx | ||
from onnx2keras import onnx_to_keras | ||
|
||
# /*------------------------------------------------- onnx2keras ----- | ||
# | Function: onnx2keras | ||
# | | ||
# | Purpose: CONVERTING AN ONNX MODEL TO KERA VERSION | ||
# | | ||
# | Parameters: .onnx file | ||
# | | ||
# | Returns: Kera model | ||
# *-------------------------------------------------------------------*/ | ||
|
||
|
||
def onnx2keras(file): | ||
# Load ONNX model | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add more comments on what the function does using this template:
|
||
onnx_model = onnx.load(file) | ||
|
||
input_all = [node.name for node in onnx_model.graph.input] | ||
|
||
# Call the converter | ||
k_model = onnx_to_keras(onnx_model, [input_all[0]]) | ||
|
||
|
||
return k_model |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
numpy | ||
h5py | ||
onnx2keras | ||
onnx |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
import nn4mc.parser as nnPr | ||
import nn4mc.datastructures as nnDs | ||
import nn4mc.generator as nnGn | ||
import unittest | ||
import os | ||
|
||
class TestTranslator(unittest.TestCase): | ||
|
||
def setUp(self): | ||
pass | ||
|
||
def test_file(self): | ||
p = nnPr.ONNXParser('../data/resnet18-v2-7.onnx') | ||
|
||
p.parse() | ||
|
||
path = os.path.dirname(os.path.abspath(__file__)) | ||
if (not os.path.exists(os.path.join(path, 'output'))): | ||
os.makedirs(os.path.join(path, 'output')) | ||
path2 = os.path.join(path, 'output') | ||
|
||
generator = nnGn.Generator(p.nn) | ||
|
||
generator.generate(path2) | ||
|
||
if __name__=='__main__': | ||
unittest.main() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
TODO(sarahaguasvivas): Use
test_translator
to see if nn4mc doesn't crash with these models:https://github.com/correlllab/nn4mc_py/blob/master/tests/test_translator/test_full.py
Create a new one called
test_full_onnx.py