diff --git a/.gitignore b/.gitignore index 72364f9..b372fc7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +.vscode/ +check_autopep8 + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/.travis.yml b/.travis.yml index b05f8f2..1b4af15 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,8 +5,8 @@ python: - "2.7" - "3.6" env: - - CHAINER_VERSION="==3.1.0" ONNX_VERSION="==0.2.1" - - CHAINER_VERSION="==4.0.0b1" ONNX_VERSION="==0.2.1" + - CHAINER_VERSION="==3.1.0" ONNX_VERSION="git+git://github.com/onnx/onnx.git@a8bd1facd321ea52f0a6f88600138606711b7a96" + - CHAINER_VERSION="==4.0.0b1" ONNX_VERSION="git+git://github.com/onnx/onnx.git@a8bd1facd321ea52f0a6f88600138606711b7a96" notifications: email: false @@ -23,7 +23,7 @@ install: - conda info -a - conda install protobuf - hash -r - - CFLAGS="-I$HOME/miniconda/include" pip install "onnx$ONNX_VERSION" + - CFLAGS="-I$HOME/miniconda/include" pip install $ONNX_VERSION - pip install "chainer$CHAINER_VERSION" - pip install -e . diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..04cf360 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,19 @@ +## Installation + +### On Ubuntu 16.04 + +```bash +conda install -c conda-forge protobuf numpy pybind11 +pip install git+git://github.com/onnx/onnx.git@a8bd1facd321ea52f0a6f88600138606711b7a96 +pip install onnx-chainer +``` + +### On macOS + +```bash +brew install protobuf +pip install pybind11 +pip install protobuf +pip install git+git://github.com/onnx/onnx.git@a8bd1facd321ea52f0a6f88600138606711b7a96 +pip install onnx-chainer +``` \ No newline at end of file diff --git a/README.md b/README.md index 146a16d..d7ea29a 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,7 @@ This is an add-on package for ONNX support by Chainer. ## Installation -```bash -pip install onnx-chainer -``` +See [INSTALL.md](INSTALL.md) ## Quick Start @@ -30,3 +28,55 @@ x = np.zeros((1, 3, 224, 224), dtype=np.float32) onnx_chainer.export(model, x, filename='VGG16.onnx') ``` + +## Supported Functions + +### Activation + +- ELU +- HardSigmoid +- LeakyReLU +- LogSoftmax +- PReLUFunction +- ReLU +- Sigmoid +- Softmax +- Softplus +- Tanh + +### Array + +- Cast +- Concat +- Depth2Space +- Pad +- Reshape +- Space2Depth +- SplitAxis +- Squeeze +- Tile +- Transpose + +### Connection + +- Convolution2DFunction +- LinearFunction + +### Pooling + +- AveragePooling2D +- MaxPooling2D + +### Normalization + +- BatchNormalization +- FixedBatchNormalization + +### Math + +- Add +- Sub +- Mul +- Neg +- Absolute +- Div \ No newline at end of file diff --git a/check_autopep8 b/check_autopep8 deleted file mode 100644 index fa29f9a..0000000 --- a/check_autopep8 +++ /dev/null @@ -1,11 +0,0 @@ ---- original/./onnx_chainer/export.py -+++ fixed/./onnx_chainer/export.py -@@ -53,7 +53,7 @@ - array = parameter.array - elif isinstance(parameter, numpy.ndarray): - array = parameter -- a=1*1 -+ a = 1 * 1 - return numpy_helper.from_array(array, param_names[id(parameter)]) - - diff --git a/onnx_chainer/__init__.py b/onnx_chainer/__init__.py index 0d3b028..6df47c6 100644 --- a/onnx_chainer/__init__.py +++ b/onnx_chainer/__init__.py @@ -1 +1,2 @@ +from .export import convert_parameter # NOQA from .export import export # NOQA diff --git a/onnx_chainer/export.py b/onnx_chainer/export.py index 6d60539..625fc49 100644 --- a/onnx_chainer/export.py +++ b/onnx_chainer/export.py @@ -1,42 +1,24 @@ from __future__ import print_function import heapq -import os import chainer from chainer import function_node from chainer import variable import numpy +from onnx_chainer import functions +from onnx_chainer import mapping + try: from onnx import checker from onnx import helper - from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE from onnx import numpy_helper _available = True - _dtype = {v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items()} - - _layers = { - 'LinearFunction': 'Gemm', - 'Reshape': 'Reshape', - 'Convolution2DFunction': 'Conv', - 'AveragePooling2D': 'AveragePool', - 'MaxPooling2D': 'MaxPool', - 'BatchNormalization': 'BatchNormalization', - 'ReLU': 'Relu', - 'Softmax': 'Softmax', - 'Add': 'Add', - 'Sub': 'Sub', - 'Mul': 'Mul', - 'Neg': 'Neg', - 'Absolute': 'Abs', - 'Div': 'Div', - } - -except (ImportError, TypeError) as e: - print(e) + +except (ImportError, TypeError): _available = False @@ -55,238 +37,20 @@ def convert_parameter(parameter, param_names): array = parameter.array elif isinstance(parameter, numpy.ndarray): array = parameter + if array.shape == (): + array = array[None] return numpy_helper.from_array(array, param_names[id(parameter)]) -def convert_convolution_2d_function(link, input_names, param_names): - input_names[input_names.index(id(link.W))] = param_names[id(link.W)] - if hasattr(link, 'b'): - input_names[input_names.index(id(link.b))] = param_names[id(link.b)] - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[link.__class__.__name__] - out_names = [str(id(out())) for out in link.outputs] - - return helper.make_node( - layer_name, input_names, out_names, - kernel_shape=link.W.shape[2:], - strides=(link.sy, link.sx), - pads=(link.ph, link.pw) - ), - - -def convert_linear_function(link, input_names, param_names): - W = convert_parameter(link.W, param_names) - input_names[input_names.index(id(link.W))] = W.name - if hasattr(link, 'b'): - b = convert_parameter(link.b, param_names) - input_names[input_names.index(id(link.b))] = b.name - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[link.__class__.__name__] - out_names = [str(id(out())) for out in link.outputs] - - return helper.make_node( - layer_name, input_names, out_names, - alpha=1., - beta=1., - broadcast=True, - transA=False, - transB=False, - ), - - -def convert_reshape(func, input_names, param_names): - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[func.__class__.__name__] - out_names = [str(id(out())) for out in func.outputs] - - return helper.make_node( - layer_name, input_names, out_names, - shape=func.shape - ), - - -def convert_average_pooling_2d(func, input_names, param_names): - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[func.__class__.__name__] - gpool = func.inputs[0].shape[2:] == (func.kh, func.kw) - out_names = [str(id(out())) for out in func.outputs] - - if not gpool: - return helper.make_node( - layer_name, input_names, out_names, - kernel_shape=(func.kh, func.kw), - pads=(func.ph, func.pw), - strides=(func.sy, func.sx) - ), - else: - return helper.make_node( - 'Global' + layer_name, input_names, out_names), - - -def convert_max_pooling_2d(func, input_names, param_names): - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[func.__class__.__name__] - gpool = func.inputs[0].shape[2:] == (func.kh, func.kw) - out_names = [str(id(out())) for out in func.outputs] - - if not gpool: - return helper.make_node( - layer_name, input_names, out_names, - kernel_shape=(func.kh, func.kw), - pads=(func.ph, func.pw), - strides=(func.sy, func.sx) - ), - else: - return helper.make_node( - 'Global' + layer_name, input_names, out_names), - - -def convert_batch_normalization(link, input_names, param_names): - gamma_idx = input_names.index(id(link.gamma)) - input_names[gamma_idx] = param_names[id(link.gamma)] - beta_idx = input_names.index(id(link.beta)) - input_names[beta_idx] = param_names[id(link.beta)] - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - input_names.append(param_names[id(link.running_mean)]) - input_names.append(param_names[id(link.running_var)]) - - layer_name = _layers[link.__class__.__name__] - unique_layer_name = os.path.dirname(input_names[1]) - out_names = [str(id(out())) for out in link.outputs] - if chainer.config.train: - out_names += [ - os.path.join(unique_layer_name, 'mean'), - os.path.join(unique_layer_name, 'var'), - os.path.join(unique_layer_name, 'saved_mean'), - os.path.join(unique_layer_name, 'saved_var') - ] - - return helper.make_node( - layer_name, input_names, out_names, - epsilon=link.eps, - is_test=not chainer.config.train, - momentum=link.decay, - spatial=True, - consumed_inputs=[False, False, False, True, True], - ), - - -def convert_relu(func, input_names, param_names): - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[func.__class__.__name__] - out_names = [str(id(out())) for out in func.outputs] - return helper.make_node(layer_name, input_names, out_names), - - -def convert_softmax(func, input_names, param_names): - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[func.__class__.__name__] - out_names = [str(id(out())) for out in func.outputs] - - return helper.make_node( - layer_name, input_names, out_names, - axis=func.axis - ), - - -def convert_nonparametric_function(func, input_names, param_names): - for i, input_name in enumerate(input_names): - if type(input_name) is not str: - input_names[i] = str(input_name) - - layer_name = _layers[func.__class__.__name__] - out_names = [str(id(out())) for out in func.outputs] - - return helper.make_node(layer_name, input_names, out_names), - - def create_node(func_name, cand, input_names, param_names, parameters, input_tensors): - if func_name == 'Convolution2DFunction': - nodes = convert_convolution_2d_function(cand, input_names, param_names) - elif func_name == 'LinearFunction': - nodes = convert_linear_function(cand, input_names, param_names) - elif func_name == 'Reshape': - nodes = convert_reshape(cand, input_names, param_names) - elif func_name == 'AveragePooling2D': - nodes = convert_average_pooling_2d(cand, input_names, param_names) - elif func_name == 'MaxPooling2D': - nodes = convert_max_pooling_2d(cand, input_names, param_names) - elif func_name == 'BatchNormalization': - layer_name = os.path.dirname(param_names[id(cand.gamma)]) - - # Add running_mean and running_var to graph - param_names[id(cand.running_mean)] = os.path.join( - layer_name, 'running_mean') - parameters.append( - numpy_helper.from_array( - cand.running_mean, - param_names[id(cand.running_mean)])) - input_tensors.append( - helper.make_tensor_value_info( - param_names[id(cand.running_mean)], - _dtype[cand.running_mean.dtype], - cand.running_mean.shape) - ) - - param_names[id(cand.running_var)] = os.path.join( - layer_name, 'running_var') - parameters.append( - numpy_helper.from_array( - cand.running_var, - param_names[id(cand.running_var)])) - input_tensors.append( - helper.make_tensor_value_info( - param_names[id(cand.running_var)], - _dtype[cand.running_var.dtype], - cand.running_var.shape) - ) - - nodes = convert_batch_normalization(cand, input_names, param_names) - elif func_name == 'ReLU': - nodes = convert_relu(cand, input_names, param_names) - elif func_name == 'Softmax': - nodes = convert_softmax(cand, input_names, param_names) - elif func_name == 'Add': - nodes = convert_nonparametric_function(cand, input_names, param_names) - elif func_name == 'Sub': - nodes = convert_nonparametric_function(cand, input_names, param_names) - elif func_name == 'Mul': - nodes = convert_nonparametric_function(cand, input_names, param_names) - elif func_name == 'Neg': - nodes = convert_nonparametric_function(cand, input_names, param_names) - elif func_name == 'Div': - nodes = convert_nonparametric_function(cand, input_names, param_names) - elif func_name == 'Absolute': - nodes = convert_nonparametric_function(cand, input_names, param_names) + converter_name = 'convert_{}'.format(func_name) + if hasattr(functions, converter_name): + converter = getattr(functions, converter_name) + nodes = converter( + cand, input_names, param_names, parameters, input_tensors) else: raise ValueError('{} is not supported.'.format(func_name)) - - # A single Chainer layer could be multiple onnx layers - # e.g., Convolution2D -> Conv + Add (for bias) for node in nodes: checker.check_node(node) return nodes @@ -352,8 +116,9 @@ def export(model, args, filename=None, export_params=True, param_names[id(param)] = name parameters.append( convert_parameter(param, param_names)) + param_shape = (1,) if param.shape == () else param.shape input_tensors.append(helper.make_tensor_value_info( - name, _dtype[param.array.dtype], param.shape)) + name, mapping.dtypes[param.array.dtype], param_shape)) if isinstance(outputs, dict): outputs = list(outputs.values()) @@ -385,9 +150,9 @@ def add_cand(cand): seen_edges.add((creator, cand)) nodes.add(creator) nodes.add(cand) + elif isinstance(cand, function_node.FunctionNode): func_name = cand.__class__.__name__ - input_names = [] for input_ in cand.inputs: if input_ is not cand and (input_, cand) not in seen_edges: @@ -396,7 +161,7 @@ def add_cand(cand): nodes.add(input_) nodes.add(cand) - # If it's a parameter + # When input_ is a parameter if input_.name is not None: input_names.append(id(input_.get_variable())) setattr(cand, input_.name, input_.get_variable()) @@ -414,20 +179,20 @@ def add_cand(cand): if id(out_var) in output_tensor_ids: idx = output_tensor_ids.index(id(out_var)) output_tensor_ids[idx] = ( - str(id(out_)), _dtype[out_var.array.dtype], + str(id(out_)), mapping.dtypes[out_var.array.dtype], out_var.shape) - if func_name in _layers.keys(): + if func_name in mapping.operators.keys(): onnx_nodes = create_node( func_name, cand, input_names, param_names, parameters, input_tensors) graph.extend(onnx_nodes) - # Add all the input values for the network to input_tensors + # Add all the input values for the network to input_tensors for i, arg in enumerate(args): name = str(id(arg)) input_tensors.append(helper.make_tensor_value_info( - name, _dtype[arg.array.dtype], arg.shape)) + name, mapping.dtypes[arg.array.dtype], arg.shape)) output_tensors = [] for out_ in output_tensor_ids: diff --git a/onnx_chainer/functions/__init__.py b/onnx_chainer/functions/__init__.py new file mode 100644 index 0000000..7c5390c --- /dev/null +++ b/onnx_chainer/functions/__init__.py @@ -0,0 +1,30 @@ +from onnx_chainer.functions.activation.elu import convert_ELU # NOQA +from onnx_chainer.functions.activation.hard_sigmoid import convert_HardSigmoid # NOQA +from onnx_chainer.functions.activation.leaky_relu import convert_LeakyReLU # NOQA +from onnx_chainer.functions.activation.log_softmax import convert_LogSoftmax # NOQA +from onnx_chainer.functions.activation.prelu import convert_PReLUFunction # NOQA +from onnx_chainer.functions.activation.relu import convert_ReLU # NOQA +from onnx_chainer.functions.activation.sigmoid import convert_Sigmoid # NOQA +from onnx_chainer.functions.activation.softmax import convert_Softmax # NOQA +from onnx_chainer.functions.activation.softplus import convert_Softplus # NOQA +from onnx_chainer.functions.activation.tanh import convert_Tanh # NOQA + +from onnx_chainer.functions.array.cast import convert_Cast # NOQA +from onnx_chainer.functions.array.concat import convert_Concat # NOQA +from onnx_chainer.functions.array.depth2space import convert_Depth2Space # NOQA +from onnx_chainer.functions.array.pad import convert_Pad # NOQA +from onnx_chainer.functions.array.reshape import convert_Reshape # NOQA +from onnx_chainer.functions.array.space2depth import convert_Space2Depth # NOQA +from onnx_chainer.functions.array.split_axis import convert_SplitAxis # NOQA +from onnx_chainer.functions.array.squeeze import convert_Squeeze # NOQA +from onnx_chainer.functions.array.tile import convert_Tile # NOQA +from onnx_chainer.functions.array.transpose import convert_Transpose # NOQA + +from onnx_chainer.functions.connection.convolution_2d import convert_Convolution2DFunction # NOQA +from onnx_chainer.functions.connection.linear import convert_LinearFunction # NOQA + +from onnx_chainer.functions.normalization.batch_normalization import convert_BatchNormalization # NOQA +from onnx_chainer.functions.normalization.batch_normalization import convert_FixedBatchNormalization # NOQA + +from onnx_chainer.functions.pooling.average_pooling_2d import convert_AveragePooling2D # NOQA +from onnx_chainer.functions.pooling.max_pooling_2d import convert_MaxPooling2D # NOQA diff --git a/onnx_chainer/functions/activation/__init__.py b/onnx_chainer/functions/activation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/activation/elu.py b/onnx_chainer/functions/activation/elu.py new file mode 100644 index 0000000..6cd5090 --- /dev/null +++ b/onnx_chainer/functions/activation/elu.py @@ -0,0 +1,17 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_ELU( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node( + layer_name, input_names, out_names, + alpha=func.alpha + ), diff --git a/onnx_chainer/functions/activation/hard_sigmoid.py b/onnx_chainer/functions/activation/hard_sigmoid.py new file mode 100644 index 0000000..e652ade --- /dev/null +++ b/onnx_chainer/functions/activation/hard_sigmoid.py @@ -0,0 +1,18 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_HardSigmoid( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node( + layer_name, input_names, out_names, + alpha=0.2, + beta=0.5 + ), diff --git a/onnx_chainer/functions/activation/leaky_relu.py b/onnx_chainer/functions/activation/leaky_relu.py new file mode 100644 index 0000000..e83ff2b --- /dev/null +++ b/onnx_chainer/functions/activation/leaky_relu.py @@ -0,0 +1,17 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_LeakyReLU( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node( + layer_name, input_names, out_names, + alpha=func.slope + ), diff --git a/onnx_chainer/functions/activation/log_softmax.py b/onnx_chainer/functions/activation/log_softmax.py new file mode 100644 index 0000000..fee66a5 --- /dev/null +++ b/onnx_chainer/functions/activation/log_softmax.py @@ -0,0 +1,17 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_LogSoftmax( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node( + layer_name, input_names, out_names, + axis=1 + ), diff --git a/onnx_chainer/functions/activation/prelu.py b/onnx_chainer/functions/activation/prelu.py new file mode 100644 index 0000000..cb881fc --- /dev/null +++ b/onnx_chainer/functions/activation/prelu.py @@ -0,0 +1,14 @@ +from onnx import helper +from onnx_chainer import mapping + + +def convert_PReLUFunction( + func, input_names, param_names, parameters, input_tensors): + input_names[input_names.index(id(func.W))] = param_names[id(func.W)] + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node(layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/activation/relu.py b/onnx_chainer/functions/activation/relu.py new file mode 100644 index 0000000..e3528e6 --- /dev/null +++ b/onnx_chainer/functions/activation/relu.py @@ -0,0 +1,14 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_ReLU( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node(layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/activation/sigmoid.py b/onnx_chainer/functions/activation/sigmoid.py new file mode 100644 index 0000000..6123ba0 --- /dev/null +++ b/onnx_chainer/functions/activation/sigmoid.py @@ -0,0 +1,14 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Sigmoid( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node(layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/activation/softmax.py b/onnx_chainer/functions/activation/softmax.py new file mode 100644 index 0000000..e26222a --- /dev/null +++ b/onnx_chainer/functions/activation/softmax.py @@ -0,0 +1,17 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Softmax(func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + axis=func.axis + ), diff --git a/onnx_chainer/functions/activation/softplus.py b/onnx_chainer/functions/activation/softplus.py new file mode 100644 index 0000000..a1c8cc4 --- /dev/null +++ b/onnx_chainer/functions/activation/softplus.py @@ -0,0 +1,14 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Softplus( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node(layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/activation/tanh.py b/onnx_chainer/functions/activation/tanh.py new file mode 100644 index 0000000..0f8a48f --- /dev/null +++ b/onnx_chainer/functions/activation/tanh.py @@ -0,0 +1,14 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Tanh( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + return helper.make_node(layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/array/__init__.py b/onnx_chainer/functions/array/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/array/cast.py b/onnx_chainer/functions/array/cast.py new file mode 100644 index 0000000..2ebd3df --- /dev/null +++ b/onnx_chainer/functions/array/cast.py @@ -0,0 +1,20 @@ +import numpy as np + +from onnx import helper +from onnx_chainer import mapping + + +def convert_Cast(func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + typ = func.type if isinstance(func.type, np.dtype) else np.dtype(func.type) + + return helper.make_node( + layer_name, input_names, out_names, + to=mapping.dtypes[typ] + ), diff --git a/onnx_chainer/functions/array/concat.py b/onnx_chainer/functions/array/concat.py new file mode 100644 index 0000000..a1488f4 --- /dev/null +++ b/onnx_chainer/functions/array/concat.py @@ -0,0 +1,17 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Concat(func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + axis=func.axis + ), diff --git a/onnx_chainer/functions/array/depth2space.py b/onnx_chainer/functions/array/depth2space.py new file mode 100644 index 0000000..351d600 --- /dev/null +++ b/onnx_chainer/functions/array/depth2space.py @@ -0,0 +1,18 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Depth2Space( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + blocksize=func.r + ), diff --git a/onnx_chainer/functions/array/pad.py b/onnx_chainer/functions/array/pad.py new file mode 100644 index 0000000..655e99d --- /dev/null +++ b/onnx_chainer/functions/array/pad.py @@ -0,0 +1,41 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Pad(func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + if func.mode not in ['constant', 'reflect', 'edge']: + raise ValueError( + '{} mode is not supported in ONNX\'s Pad operation'.format( + func.mode)) + + if 'constant_values' in func.keywords: + values = func.keywords['constant_values'] + if not isinstance(values, int) and len(values) > 1: + raise ValueError( + 'ONNX doesn\'t support multiple constant values for Pad ' + 'operation') + elif not isinstance(values, int): + values = values[0] + + node = helper.make_node( + layer_name, input_names, out_names, + mode=func.mode, + pads=func.pad_bw.tolist(), + value=values + ) + else: + node = helper.make_node( + layer_name, input_names, out_names, + mode=func.mode, + pads=func.pad_bw.tolist(), + ) + + return node, diff --git a/onnx_chainer/functions/array/reshape.py b/onnx_chainer/functions/array/reshape.py new file mode 100644 index 0000000..6718fdf --- /dev/null +++ b/onnx_chainer/functions/array/reshape.py @@ -0,0 +1,17 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Reshape(func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + shape=func.shape + ), diff --git a/onnx_chainer/functions/array/space2depth.py b/onnx_chainer/functions/array/space2depth.py new file mode 100644 index 0000000..a10a7fa --- /dev/null +++ b/onnx_chainer/functions/array/space2depth.py @@ -0,0 +1,18 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Space2Depth( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + blocksize=func.r + ), diff --git a/onnx_chainer/functions/array/split_axis.py b/onnx_chainer/functions/array/split_axis.py new file mode 100644 index 0000000..26532e4 --- /dev/null +++ b/onnx_chainer/functions/array/split_axis.py @@ -0,0 +1,29 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_SplitAxis( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + if hasattr(func.indices_or_sections, '__iter__'): + split = [] + prev_i = 0 + for i in func.indices_or_sections: + split.append(i - prev_i) + prev_i = i + else: + length = func.inputs[0].shape[func.axis] // func.indices_or_sections + split = [length for _ in range(func.indices_or_sections)] + + return helper.make_node( + layer_name, input_names, out_names, + axis=func.axis, + split=split + ), diff --git a/onnx_chainer/functions/array/squeeze.py b/onnx_chainer/functions/array/squeeze.py new file mode 100644 index 0000000..c64f676 --- /dev/null +++ b/onnx_chainer/functions/array/squeeze.py @@ -0,0 +1,26 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Squeeze( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + if func.axis is None: + axis = [] + for s in func.inputs[0].shape: + if s == 1: + axis.append(s) + else: + axis = func.axis + + return helper.make_node( + layer_name, input_names, out_names, + axes=axis + ), diff --git a/onnx_chainer/functions/array/tile.py b/onnx_chainer/functions/array/tile.py new file mode 100644 index 0000000..9835460 --- /dev/null +++ b/onnx_chainer/functions/array/tile.py @@ -0,0 +1,60 @@ +import os + +import numpy as np + +from onnx import helper +from onnx import numpy_helper +from onnx_chainer import mapping + + +def convert_Tile(func, input_names, param_names, parameters, input_tensors): + + # Add tiles and axis to graph + if isinstance(func.reps, int): + func.reps = [func.reps] + tiles = np.asarray(func.reps, dtype=np.float32) + axis = np.array([i for i, _ in enumerate(func.reps)], dtype=np.float32) + layer_name = 'tile_{}'.format(str(id(tiles))) + + param_names[id(tiles)] = os.path.join(layer_name, 'tiles') + parameters.append( + numpy_helper.from_array( + tiles, + param_names[id(tiles)] + ) + ) + input_tensors.append( + helper.make_tensor_value_info( + param_names[id(tiles)], + mapping.dtypes[tiles.dtype], + tiles.shape + ) + ) + input_names.append(param_names[id(tiles)]) + + param_names[id(axis)] = os.path.join(layer_name, 'axis') + parameters.append( + numpy_helper.from_array( + axis, + param_names[id(axis)] + ) + ) + input_tensors.append( + helper.make_tensor_value_info( + param_names[id(axis)], + mapping.dtypes[axis.dtype], + axis.shape + ) + ) + input_names.append(param_names[id(axis)]) + + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + axis = [i for i, _ in enumerate(func.reps)] + + return helper.make_node(layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/array/transpose.py b/onnx_chainer/functions/array/transpose.py new file mode 100644 index 0000000..dbec681 --- /dev/null +++ b/onnx_chainer/functions/array/transpose.py @@ -0,0 +1,23 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Transpose( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + if func.axes is None: + node = helper.make_node(layer_name, input_names, out_names) + else: + node = helper.make_node( + layer_name, input_names, out_names, + perm=func.axes + ) + + return node, diff --git a/onnx_chainer/functions/connection/__init__.py b/onnx_chainer/functions/connection/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/connection/convolution_2d.py b/onnx_chainer/functions/connection/convolution_2d.py new file mode 100644 index 0000000..7977367 --- /dev/null +++ b/onnx_chainer/functions/connection/convolution_2d.py @@ -0,0 +1,23 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_Convolution2DFunction( + func, input_names, param_names, parameters, input_tensors): + input_names[input_names.index(id(func.W))] = param_names[id(func.W)] + if hasattr(func, 'b'): + input_names[input_names.index(id(func.b))] = param_names[id(func.b)] + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + kernel_shape=func.W.shape[2:], + strides=(func.sy, func.sx), + pads=(func.ph, func.pw) + ), diff --git a/onnx_chainer/functions/connection/deconvolution_2d.py b/onnx_chainer/functions/connection/deconvolution_2d.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/connection/dilated_convolution_2d.py b/onnx_chainer/functions/connection/dilated_convolution_2d.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/connection/embed_id.py b/onnx_chainer/functions/connection/embed_id.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/connection/linear.py b/onnx_chainer/functions/connection/linear.py new file mode 100644 index 0000000..da95e3b --- /dev/null +++ b/onnx_chainer/functions/connection/linear.py @@ -0,0 +1,45 @@ +import os + +import numpy as np + +from onnx import helper +from onnx import numpy_helper +from onnx_chainer import mapping + + +def convert_LinearFunction( + func, input_names, param_names, parameters, input_tensors): + input_names[input_names.index(id(func.W))] = param_names[id(func.W)] + if hasattr(func, 'b'): + input_names[input_names.index(id(func.b))] = param_names[id(func.b)] + else: + # If nobias=True, create zero vector and add it to parameters + layer_name = os.path.dirname(param_names[id(func.W)]) + bias = np.zeros((func.W.shape[1]), dtype=func.W.array.dtype) + param_names[id(bias)] = os.path.join(layer_name, 'b') + parameters.append( + numpy_helper.from_array( + bias, + param_names[id(bias)] + ) + ) + input_tensors.append( + helper.make_tensor_value_info( + param_names[id(bias)], + mapping.dtypes[bias.dtype], + bias.shape + ) + ) + input_names.append(param_names[id(bias)]) + + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + axis=1, + axis_w=1 + ), diff --git a/onnx_chainer/functions/connection/n_step_gru.py b/onnx_chainer/functions/connection/n_step_gru.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/connection/n_step_lstm.py b/onnx_chainer/functions/connection/n_step_lstm.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/connection/n_step_rnn.py b/onnx_chainer/functions/connection/n_step_rnn.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/basic_math.py b/onnx_chainer/functions/math/basic_math.py new file mode 100644 index 0000000..60d2622 --- /dev/null +++ b/onnx_chainer/functions/math/basic_math.py @@ -0,0 +1,58 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_unary_operator( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node(layer_name, input_names, out_names), + + +def convert_binary_operator( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node(layer_name, input_names, out_names), + + +def convert_Add(func, input_names, param_names, parameters, input_tensors): + return convert_binary_operator( + func, input_names, param_names, parameters, input_tensors) + + +def convert_Sub(func, input_names, param_names, parameters, input_tensors): + return convert_binary_operator( + func, input_names, param_names, parameters, input_tensors) + + +def convert_Mul(func, input_names, param_names, parameters, input_tensors): + return convert_binary_operator( + func, input_names, param_names, parameters, input_tensors) + + +def convert_Neg(func, input_names, param_names, parameters, input_tensors): + return convert_binary_operator( + func, input_names, param_names, parameters, input_tensors) + + +def convert_Div(func, input_names, param_names, parameters, input_tensors): + return convert_binary_operator( + func, input_names, param_names, parameters, input_tensors) + + +def convert_Absolute( + func, input_names, param_names, parameters, input_tensors): + return convert_unary_operator( + func, input_names, param_names, parameters, input_tensors) diff --git a/onnx_chainer/functions/math/clip.py b/onnx_chainer/functions/math/clip.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/exponential.py b/onnx_chainer/functions/math/exponential.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/matmul.py b/onnx_chainer/functions/math/matmul.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/maximum.py b/onnx_chainer/functions/math/maximum.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/minimum.py b/onnx_chainer/functions/math/minimum.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/minmax.py b/onnx_chainer/functions/math/minmax.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/sqrt.py b/onnx_chainer/functions/math/sqrt.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/squared_difference.py b/onnx_chainer/functions/math/squared_difference.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/math/sum.py b/onnx_chainer/functions/math/sum.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/noise/__init__.py b/onnx_chainer/functions/noise/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/noise/dropout.py b/onnx_chainer/functions/noise/dropout.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/normalization/__init__.py b/onnx_chainer/functions/normalization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/normalization/batch_normalization.py b/onnx_chainer/functions/normalization/batch_normalization.py new file mode 100644 index 0000000..4c696bf --- /dev/null +++ b/onnx_chainer/functions/normalization/batch_normalization.py @@ -0,0 +1,137 @@ +import os + +import chainer +from onnx import helper +from onnx import numpy_helper + +from onnx_chainer import mapping + + +def convert_BatchNormalization( + func, input_names, param_names, parameters, input_tensors): + + layer_name = os.path.dirname(param_names[id(func.gamma)]) + + # Add running_mean and running_var to graph + param_names[id(func.running_mean)] = os.path.join( + layer_name, 'running_mean') + parameters.append( + numpy_helper.from_array( + func.running_mean, + param_names[id(func.running_mean)])) + input_tensors.append( + helper.make_tensor_value_info( + param_names[id(func.running_mean)], + mapping.dtypes[func.running_mean.dtype], + func.running_mean.shape) + ) + + param_names[id(func.running_var)] = os.path.join( + layer_name, 'running_var') + parameters.append( + numpy_helper.from_array( + func.running_var, + param_names[id(func.running_var)])) + input_tensors.append( + helper.make_tensor_value_info( + param_names[id(func.running_var)], + mapping.dtypes[func.running_var.dtype], + func.running_var.shape) + ) + + gamma_idx = input_names.index(id(func.gamma)) + input_names[gamma_idx] = param_names[id(func.gamma)] + beta_idx = input_names.index(id(func.beta)) + input_names[beta_idx] = param_names[id(func.beta)] + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + input_names.append(param_names[id(func.running_mean)]) + input_names.append(param_names[id(func.running_var)]) + + layer_name = mapping.operators[func.__class__.__name__] + unique_layer_name = os.path.dirname(input_names[1]) + out_names = [str(id(out())) for out in func.outputs] + out_names += [ + os.path.join(unique_layer_name, 'mean'), + os.path.join(unique_layer_name, 'var'), + os.path.join(unique_layer_name, 'saved_mean'), + os.path.join(unique_layer_name, 'saved_var') + ] + + return helper.make_node( + layer_name, input_names, out_names, + epsilon=func.eps, + is_test=not chainer.config.train, + momentum=func.decay, + spatial=True, + consumed_inputs=[False, False, False, True, True], + ), + + +def convert_FixedBatchNormalization( + func, input_names, param_names, parameters, input_tensors): + + layer_name = os.path.dirname(param_names[id(func.gamma)]) + + # Add avg_mean and avg_var to graph + mean_id, var_id = input_names[3:] + mean_arr, var_arr = [i.get_variable().array for i in func.inputs[3:]] + + param_names[mean_id] = os.path.join(layer_name, 'mean') + parameters.append( + numpy_helper.from_array( + mean_arr, + param_names[mean_id] + ) + ) + input_tensors.append( + helper.make_tensor_value_info( + param_names[mean_id], + mapping.dtypes[mean_arr.dtype], + mean_arr.shape + ) + ) + + param_names[var_id] = os.path.join(layer_name, 'var') + parameters.append( + numpy_helper.from_array( + var_arr, + param_names[var_id] + ) + ) + input_tensors.append( + helper.make_tensor_value_info( + param_names[var_id], + mapping.dtypes[var_arr.dtype], + var_arr.shape + ) + ) + + gamma_idx = input_names.index(id(func.gamma)) + input_names[gamma_idx] = param_names[id(func.gamma)] + + beta_idx = input_names.index(id(func.beta)) + input_names[beta_idx] = param_names[id(func.beta)] + + mean_idx = input_names.index(mean_id) + input_names[mean_idx] = param_names[mean_id] + + var_idx = input_names.index(var_id) + input_names[var_idx] = param_names[var_id] + + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + out_names = [str(id(out())) for out in func.outputs] + + return helper.make_node( + layer_name, input_names, out_names, + epsilon=func.eps, + is_test=not chainer.config.train, + momentum=0.9, + spatial=True, + consumed_inputs=[False, False, False, True, True], + ), diff --git a/onnx_chainer/functions/normalization/local_response_normalization.py b/onnx_chainer/functions/normalization/local_response_normalization.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/pooling/__init__.py b/onnx_chainer/functions/pooling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/pooling/average_pooling_2d.py b/onnx_chainer/functions/pooling/average_pooling_2d.py new file mode 100644 index 0000000..7b39134 --- /dev/null +++ b/onnx_chainer/functions/pooling/average_pooling_2d.py @@ -0,0 +1,25 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_AveragePooling2D( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + gpool = func.inputs[0].shape[2:] == (func.kh, func.kw) + out_names = [str(id(out())) for out in func.outputs] + + if not gpool: + return helper.make_node( + layer_name, input_names, out_names, + kernel_shape=(func.kh, func.kw), + pads=(func.ph, func.pw), + strides=(func.sy, func.sx) + ), + else: + return helper.make_node( + 'Global' + layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/pooling/average_pooling_nd.py b/onnx_chainer/functions/pooling/average_pooling_nd.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/pooling/max_pooling_2d.py b/onnx_chainer/functions/pooling/max_pooling_2d.py new file mode 100644 index 0000000..5ebb424 --- /dev/null +++ b/onnx_chainer/functions/pooling/max_pooling_2d.py @@ -0,0 +1,25 @@ +from onnx import helper + +from onnx_chainer import mapping + + +def convert_MaxPooling2D( + func, input_names, param_names, parameters, input_tensors): + for i, input_name in enumerate(input_names): + if type(input_name) is not str: + input_names[i] = str(input_name) + + layer_name = mapping.operators[func.__class__.__name__] + gpool = func.inputs[0].shape[2:] == (func.kh, func.kw) + out_names = [str(id(out())) for out in func.outputs] + + if not gpool: + return helper.make_node( + layer_name, input_names, out_names, + kernel_shape=(func.kh, func.kw), + pads=(func.ph, func.pw), + strides=(func.sy, func.sx) + ), + else: + return helper.make_node( + 'Global' + layer_name, input_names, out_names), diff --git a/onnx_chainer/functions/pooling/max_pooling_nd.py b/onnx_chainer/functions/pooling/max_pooling_nd.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/pooling/unpooling_2d.py b/onnx_chainer/functions/pooling/unpooling_2d.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/functions/pooling/unpooling_nd.py b/onnx_chainer/functions/pooling/unpooling_nd.py new file mode 100644 index 0000000..e69de29 diff --git a/onnx_chainer/mapping.py b/onnx_chainer/mapping.py new file mode 100644 index 0000000..c552c6e --- /dev/null +++ b/onnx_chainer/mapping.py @@ -0,0 +1,49 @@ +from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE + +dtypes = {v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items()} + +operators = { + # Activation + 'ELU': 'Elu', + 'HardSigmoid': 'HardSigmoid', + 'LeakyReLU': 'LeakyRelu', + 'LogSoftmax': 'LogSoftmax', + 'PReLUFunction': 'PRelu', + 'ReLU': 'Relu', + 'Sigmoid': 'Sigmoid', + 'Softmax': 'Softmax', + 'Softplus': 'Softplus', + 'Tanh': 'Tanh', + + # Array + 'Cast': 'Cast', + 'Concat': 'Concat', + 'Depth2Space': 'DepthToSpace', + 'Pad': 'Pad', + 'Reshape': 'Reshape', + 'Space2Depth': 'SpaceToDepth', + 'SplitAxis': 'Split', + 'Squeeze': 'Squeeze', + 'Tile': 'Tile', + 'Transpose': 'Transpose', + + # Connection + 'Convolution2DFunction': 'Conv', + 'LinearFunction': 'FC', + + # Pooling + 'AveragePooling2D': 'AveragePool', + 'MaxPooling2D': 'MaxPool', + + # Normalization + 'BatchNormalization': 'BatchNormalization', + 'FixedBatchNormalization': 'BatchNormalization', + + # Math + 'Add': 'Add', + 'Sub': 'Sub', + 'Mul': 'Mul', + 'Neg': 'Neg', + 'Absolute': 'Abs', + 'Div': 'Div', +} diff --git a/setup.py b/setup.py index 3f92616..9356cdf 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ setup( name='onnx-chainer', packages=['onnx_chainer'], - version='0.2.1a2', + version='0.2.1b1', description='ONNX support for Chainer', author='Shunta Saito', author_email='shunta@preferred.jp', @@ -13,6 +13,6 @@ 'chainer>=3.1.0', 'onnx==0.2.1' ], - tests_require=['chainer>=2.0.0', 'onnx>=0.2.1', 'numpy'], + tests_require=['chainer>=3.1.0', 'onnx==0.2.1', 'numpy'], license='MIT', ) diff --git a/tests/functions_tests/activation_tests/test_elu.py b/tests/functions_tests/activation_tests/test_elu.py new file mode 100644 index 0000000..7b92b18 --- /dev/null +++ b/tests/functions_tests/activation_tests/test_elu.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.elu(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_hard_sigmoid.py b/tests/functions_tests/activation_tests/test_hard_sigmoid.py new file mode 100644 index 0000000..e280d98 --- /dev/null +++ b/tests/functions_tests/activation_tests/test_hard_sigmoid.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.hard_sigmoid(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_leaky_relu.py b/tests/functions_tests/activation_tests/test_leaky_relu.py new file mode 100644 index 0000000..47db02f --- /dev/null +++ b/tests/functions_tests/activation_tests/test_leaky_relu.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.leaky_relu(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_log_softmax.py b/tests/functions_tests/activation_tests/test_log_softmax.py new file mode 100644 index 0000000..9def545 --- /dev/null +++ b/tests/functions_tests/activation_tests/test_log_softmax.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.log_softmax(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_prelu.py b/tests/functions_tests/activation_tests/test_prelu.py new file mode 100644 index 0000000..9a87c83 --- /dev/null +++ b/tests/functions_tests/activation_tests/test_prelu.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5, nobias=True) + self.prelu = L.PReLU() + + def __call__(self, x): + return self.prelu(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_relu.py b/tests/functions_tests/activation_tests/test_relu.py new file mode 100644 index 0000000..de4ecb1 --- /dev/null +++ b/tests/functions_tests/activation_tests/test_relu.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.relu(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_sigmoid.py b/tests/functions_tests/activation_tests/test_sigmoid.py new file mode 100644 index 0000000..3b62f5d --- /dev/null +++ b/tests/functions_tests/activation_tests/test_sigmoid.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.sigmoid(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_softmax.py b/tests/functions_tests/activation_tests/test_softmax.py new file mode 100644 index 0000000..fdee6e9 --- /dev/null +++ b/tests/functions_tests/activation_tests/test_softmax.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.softmax(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_softplus.py b/tests/functions_tests/activation_tests/test_softplus.py new file mode 100644 index 0000000..7465d8b --- /dev/null +++ b/tests/functions_tests/activation_tests/test_softplus.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.softplus(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/activation_tests/test_tanh.py b/tests/functions_tests/activation_tests/test_tanh.py new file mode 100644 index 0000000..511ed4d --- /dev/null +++ b/tests/functions_tests/activation_tests/test_tanh.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.tanh(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_cast.py b/tests/functions_tests/array_tests/test_cast.py new file mode 100644 index 0000000..d0b054c --- /dev/null +++ b/tests/functions_tests/array_tests/test_cast.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.cast(self.l1(x), np.float32) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_concat.py b/tests/functions_tests/array_tests/test_concat.py new file mode 100644 index 0000000..152a9a8 --- /dev/null +++ b/tests/functions_tests/array_tests/test_concat.py @@ -0,0 +1,34 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + h = self.l1(x) + return F.concat((h, h)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_depth2space.py b/tests/functions_tests/array_tests/test_depth2space.py new file mode 100644 index 0000000..beeb332 --- /dev/null +++ b/tests/functions_tests/array_tests/test_depth2space.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Convolution2D(None, 12, 3, 1, 1) + + def __call__(self, x): + return F.depth2space(self.l1(x), 2) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 3, 6, 6), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_pad.py b/tests/functions_tests/array_tests/test_pad.py new file mode 100644 index 0000000..908afe8 --- /dev/null +++ b/tests/functions_tests/array_tests/test_pad.py @@ -0,0 +1,41 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +from chainer import testing +import numpy as np + +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self, mode): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + self.mode = mode + + def __call__(self, x): + return F.pad(self.l1(x), (0, 2), self.mode) + + +@testing.parameterize( + {'mode': 'constant'}, + {'mode': 'reflect'}, + {'mode': 'edge'}, +) +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model(self.mode) + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_reshape.py b/tests/functions_tests/array_tests/test_reshape.py new file mode 100644 index 0000000..b16fd78 --- /dev/null +++ b/tests/functions_tests/array_tests/test_reshape.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.reshape(self.l1(x), (1, 5, 1, 1)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_space2depth.py b/tests/functions_tests/array_tests/test_space2depth.py new file mode 100644 index 0000000..e868e96 --- /dev/null +++ b/tests/functions_tests/array_tests/test_space2depth.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Convolution2D(None, 5, 3, 1, 1) + + def __call__(self, x): + return F.space2depth(self.l1(x), 2) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 3, 6, 6), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_split_axis.py b/tests/functions_tests/array_tests/test_split_axis.py new file mode 100644 index 0000000..57577a3 --- /dev/null +++ b/tests/functions_tests/array_tests/test_split_axis.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 6) + + def __call__(self, x): + return F.split_axis(self.l1(x), 2, 1) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_squeeze.py b/tests/functions_tests/array_tests/test_squeeze.py new file mode 100644 index 0000000..858e73a --- /dev/null +++ b/tests/functions_tests/array_tests/test_squeeze.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.squeeze(F.reshape(self.l1(x), (1, 5, 1, 1))) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_tile.py b/tests/functions_tests/array_tests/test_tile.py new file mode 100644 index 0000000..b2ac21b --- /dev/null +++ b/tests/functions_tests/array_tests/test_tile.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.tile(self.l1(x), (1, 2)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/functions_tests/array_tests/test_transpose.py b/tests/functions_tests/array_tests/test_transpose.py new file mode 100644 index 0000000..8d55459 --- /dev/null +++ b/tests/functions_tests/array_tests/test_transpose.py @@ -0,0 +1,33 @@ +import unittest + +import chainer +import chainer.functions as F +import chainer.links as L +import numpy as np +import onnx_chainer + + +class Model(chainer.Chain): + + def __init__(self): + super(Model, self).__init__() + with self.init_scope(): + self.l1 = L.Linear(5, 5) + + def __call__(self, x): + return F.transpose(self.l1(x)) + + +class TestExport(unittest.TestCase): + + def setUp(self): + self.model = Model() + self.x = np.zeros((1, 5), dtype=np.float32) + + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_train(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x) diff --git a/tests/test_export.py b/tests/test_export.py index c825f9c..8a36099 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -7,10 +7,10 @@ import onnx_chainer -class MLP(chainer.Chain): +class Model(chainer.Chain): def __init__(self, n_units, n_out): - super(MLP, self).__init__() + super(Model, self).__init__() with self.init_scope(): self.l1 = L.Convolution2D(None, n_units, 3, 1, 1) self.b1 = L.BatchNormalization(n_units) @@ -24,9 +24,13 @@ def __call__(self, x): class TestExport(unittest.TestCase): def setUp(self): - self.model = MLP(3, 5) + self.model = Model(3, 5) self.x = np.zeros((1, 3, 5, 5), dtype=np.float32) - def test_export(self): - model = onnx_chainer.export(self.model, self.x) - print(model) + def test_export_test(self): + chainer.config.train = False + onnx_chainer.export(self.model, self.x) + + def test_export_traion(self): + chainer.config.train = True + onnx_chainer.export(self.model, self.x)