From 4eb3763e61fa19581ca6d21ebb8a837bf8293430 Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Thu, 28 May 2020 00:18:06 +0800 Subject: [PATCH 01/24] Implement Squeezenet using Squeezenet1.1 --- examples/onnx/squeezenet.py | 117 ++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 examples/onnx/squeezenet.py diff --git a/examples/onnx/squeezenet.py b/examples/onnx/squeezenet.py new file mode 100644 index 0000000000..92f9372c8c --- /dev/null +++ b/examples/onnx/squeezenet.py @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under th + +import os +import numpy as np +from PIL import Image + +from singa import device +from singa import tensor +from singa import autograd +from singa import sonnx +import onnx +from utils import download_model, update_batch_size, check_exist_or_download + +import logging +logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') + + +def preprocess(img): + img = img.resize((224, 224)) + img = img.crop((0, 0, 224, 224)) + img = np.array(img).astype(np.float32) / 255. + img = np.rollaxis(img, 2, 0) + for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]): + img[channel, :, :] -= mean + img[channel, :, :] /= std + img = np.expand_dims(img, axis=0) + return img + +def get_image_labe(): + # download label + label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' + with open(check_exist_or_download(label_url), 'r') as f: + labels = [l.rstrip() for l in f] + + # download image + image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' + img = Image.open(check_exist_or_download(image_url)) + return img, labels + +class Infer: + + def __init__(self, sg_ir): + self.sg_ir = sg_ir + for idx, tens in sg_ir.tensor_map.items(): + # allow the tensors to be updated + tens.requires_grad = True + tens.stores_grad = True + sg_ir.tensor_map[idx] = tens + + def forward(self, x): + return sg_ir.run([x])[0] + + +if __name__ == "__main__": + + url = 'https://github.com/onnx/models/raw/master/vision/classification/squeezenet/model/squeezenet1.1-7.tar.gz' + download_dir = '/tmp/' + model_path = os.path.join(download_dir, 'squeezenet1.1', 'squeezenet1.1.onnx') + + logging.info("onnx load model...") + download_model(url) + onnx_model = onnx.load(model_path) + + # set batch size + onnx_model = update_batch_size(onnx_model, 1) + + # prepare the model + logging.info("prepare model...") + dev = device.get_default_device() + sg_ir = sonnx.prepare(onnx_model, device=dev) + autograd.training = False + model = Infer(sg_ir) + + # verify the test + from utils import load_dataset + inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'squeezenet1.1', 'test_data_set_0')) + x_batch = tensor.Tensor(device=dev, data=inputs[0]) + outputs = model.forward(x_batch) + for ref_o, o in zip(ref_outputs, outputs): + np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) + + # inference + logging.info("preprocessing...") + img, labels = get_image_labe() + img = preprocess(img) + + logging.info("model running...") + x_batch = tensor.Tensor(device=dev, data=img) + y = model.forward(x_batch) + + logging.info("postprocessing...") + y = tensor.softmax(y) + scores = tensor.to_numpy(y) + scores = np.squeeze(scores) + a = np.argsort(scores)[::-1] + # The output needs to be [1,1000,1,1] + #a = np.expand_dims(a, axis=0) + #print(a.shape) + for i in a[0:5]: + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) \ No newline at end of file From 5588eeb573658eddef72ca3e494aeba2a87fc853 Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Thu, 28 May 2020 00:38:16 +0800 Subject: [PATCH 02/24] Modify the comment regarding dimension --- examples/onnx/squeezenet.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/onnx/squeezenet.py b/examples/onnx/squeezenet.py index 92f9372c8c..2bdbec8e6b 100644 --- a/examples/onnx/squeezenet.py +++ b/examples/onnx/squeezenet.py @@ -110,8 +110,8 @@ def forward(self, x): scores = tensor.to_numpy(y) scores = np.squeeze(scores) a = np.argsort(scores)[::-1] - # The output needs to be [1,1000,1,1] - #a = np.expand_dims(a, axis=0) - #print(a.shape) + # The guide stated the output needs to be [1,1000,1,1], but here it is [1000,1,1] + # The dimension can be expanded if [1,1000,1,1] is desired + # a = np.expand_dims(a, axis=0) for i in a[0:5]: logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) \ No newline at end of file From 1efaf4b3033aeb29bef413936f91c01788059bd1 Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Thu, 28 May 2020 00:48:35 +0800 Subject: [PATCH 03/24] Change device from CPU to GPU --- examples/onnx/squeezenet.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/onnx/squeezenet.py b/examples/onnx/squeezenet.py index 2bdbec8e6b..0246e503c9 100644 --- a/examples/onnx/squeezenet.py +++ b/examples/onnx/squeezenet.py @@ -43,7 +43,7 @@ def preprocess(img): img = np.expand_dims(img, axis=0) return img -def get_image_labe(): +def get_image_label(): # download label label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' with open(check_exist_or_download(label_url), 'r') as f: @@ -83,7 +83,7 @@ def forward(self, x): # prepare the model logging.info("prepare model...") - dev = device.get_default_device() + dev = device.create_cuda_gpu() sg_ir = sonnx.prepare(onnx_model, device=dev) autograd.training = False model = Infer(sg_ir) @@ -98,7 +98,7 @@ def forward(self, x): # inference logging.info("preprocessing...") - img, labels = get_image_labe() + img, labels = get_image_label() img = preprocess(img) logging.info("model running...") From a1cc797d647fb5e4513482ce8dce0b1f088238e8 Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Sat, 30 May 2020 00:39:43 +0800 Subject: [PATCH 04/24] Formating squeezenet --- examples/onnx/shufflenet.py | 114 ++++++++++++++++++++++++++++++++++ examples/onnx/shufflenet2.py | 116 +++++++++++++++++++++++++++++++++++ examples/onnx/squeezenet.py | 13 ++-- 3 files changed, 237 insertions(+), 6 deletions(-) create mode 100644 examples/onnx/shufflenet.py create mode 100644 examples/onnx/shufflenet2.py diff --git a/examples/onnx/shufflenet.py b/examples/onnx/shufflenet.py new file mode 100644 index 0000000000..c8dc24cc9b --- /dev/null +++ b/examples/onnx/shufflenet.py @@ -0,0 +1,114 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under th + +import os +import numpy as np +from PIL import Image + +from singa import device +from singa import tensor +from singa import autograd +from singa import sonnx +import onnx +from utils import download_model, update_batch_size, check_exist_or_download + +import logging +logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') + + +def preprocess(img): + img = img.resize((256, 256)) + img = img.crop((16, 16, 240, 240)) + img = np.array(img).astype(np.float32) / 255. + img = np.rollaxis(img, 2, 0) + for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]): + img[channel, :, :] -= mean + img[channel, :, :] /= std + img = np.expand_dims(img, axis=0) + return img + +def get_image_labe(): + # download label + label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' + with open(check_exist_or_download(label_url), 'r') as f: + labels = [l.rstrip() for l in f] + + # download image + image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' + img = Image.open(check_exist_or_download(image_url)) + return img, labels + +class Infer: + + def __init__(self, sg_ir): + self.sg_ir = sg_ir + for idx, tens in sg_ir.tensor_map.items(): + # allow the tensors to be updated + tens.requires_grad = True + tens.stores_grad = True + sg_ir.tensor_map[idx] = tens + + def forward(self, x): + return sg_ir.run([x])[0] + + +if __name__ == "__main__": + + url = 'https://s3.amazonaws.com/download.onnx/models/opset_9/shufflenet.tar.gz' + download_dir = '/tmp/' + model_path = os.path.join(download_dir, 'shufflenet', 'model.onnx') + + logging.info("onnx load model...") + download_model(url) + onnx_model = onnx.load(model_path) + + # set batch size + onnx_model = update_batch_size(onnx_model, 1) + + # prepare the model + logging.info("prepare model...") + dev = device.get_default_device() + sg_ir = sonnx.prepare(onnx_model, device=dev) + autograd.training = False + model = Infer(sg_ir) + + # verifty the test + # from utils import load_dataset + # inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'resnet18v1', 'test_data_set_0')) + # x_batch = tensor.Tensor(device=dev, data=inputs[0]) + # outputs = model.forward(x_batch) + # for ref_o, o in zip(ref_outputs, outputs): + # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) + + # inference + logging.info("preprocessing...") + img, labels = get_image_labe() + img = preprocess(img) + + logging.info("model running...") + x_batch = tensor.Tensor(device=dev, data=img) + y = model.forward(x_batch) + + logging.info("postprocessing...") + y = tensor.softmax(y) + scores = tensor.to_numpy(y) + scores = np.squeeze(scores) + a = np.argsort(scores)[::-1] + for i in a[0:5]: + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) \ No newline at end of file diff --git a/examples/onnx/shufflenet2.py b/examples/onnx/shufflenet2.py new file mode 100644 index 0000000000..8b76ad22d1 --- /dev/null +++ b/examples/onnx/shufflenet2.py @@ -0,0 +1,116 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under th + +import os +import numpy as np +from PIL import Image + +from singa import device +from singa import tensor +from singa import autograd +from singa import sonnx +import onnx +from utils import download_model, update_batch_size, check_exist_or_download + +import logging +logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') + + +def preprocess(img): + img = img.resize((256, 256)) + img = img.crop((16, 16, 240, 240)) + img = np.array(img).astype(np.float32) / 255. + img = np.rollaxis(img, 2, 0) + for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]): + img[channel, :, :] -= mean + img[channel, :, :] /= std + img = np.expand_dims(img, axis=0) + return img + +def get_image_labe(): + # download label + label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' + with open(check_exist_or_download(label_url), 'r') as f: + labels = [l.rstrip() for l in f] + + # download image + image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' + img = Image.open(check_exist_or_download(image_url)) + return img, labels + +class Infer: + + def __init__(self, sg_ir): + self.sg_ir = sg_ir + for idx, tens in sg_ir.tensor_map.items(): + # allow the tensors to be updated + tens.requires_grad = True + tens.stores_grad = True + sg_ir.tensor_map[idx] = tens + + def forward(self, x): + return sg_ir.run([x])[0] + + +if __name__ == "__main__": + + url = 'https://github.com/onnx/models/raw/master/vision/classification/shufflenet/model/shufflenet-v2-10.tar.gz' + download_dir = '/tmp/' + model_path = os.path.join(download_dir, 'model', 'test_shufflenetv2', 'model.onnx') + + logging.info("onnx load model...") + download_model(url) + ''' + onnx_model = onnx.load(model_path) + + # set batch size + onnx_model = update_batch_size(onnx_model, 1) + + # prepare the model + logging.info("prepare model...") + dev = device.get_default_device() + sg_ir = sonnx.prepare(onnx_model, device=dev) + autograd.training = False + model = Infer(sg_ir) + + # verifty the test + # from utils import load_dataset + # inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'resnet18v1', 'test_data_set_0')) + # x_batch = tensor.Tensor(device=dev, data=inputs[0]) + # outputs = model.forward(x_batch) + # for ref_o, o in zip(ref_outputs, outputs): + # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) + + # inference + logging.info("preprocessing...") + img, labels = get_image_labe() + img = preprocess(img) + + logging.info("model running...") + x_batch = tensor.Tensor(device=dev, data=img) + y = model.forward(x_batch) + + logging.info("postprocessing...") + y = tensor.softmax(y) + scores = tensor.to_numpy(y) + scores = np.squeeze(scores) + a = np.argsort(scores)[::-1] + for i in a[0:5]: + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) + ''' \ No newline at end of file diff --git a/examples/onnx/squeezenet.py b/examples/onnx/squeezenet.py index 0246e503c9..6b3601eeae 100644 --- a/examples/onnx/squeezenet.py +++ b/examples/onnx/squeezenet.py @@ -43,6 +43,7 @@ def preprocess(img): img = np.expand_dims(img, axis=0) return img + def get_image_label(): # download label label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' @@ -54,6 +55,7 @@ def get_image_label(): img = Image.open(check_exist_or_download(image_url)) return img, labels + class Infer: def __init__(self, sg_ir): @@ -72,7 +74,8 @@ def forward(self, x): url = 'https://github.com/onnx/models/raw/master/vision/classification/squeezenet/model/squeezenet1.1-7.tar.gz' download_dir = '/tmp/' - model_path = os.path.join(download_dir, 'squeezenet1.1', 'squeezenet1.1.onnx') + model_path = os.path.join(download_dir, 'squeezenet1.1', + 'squeezenet1.1.onnx') logging.info("onnx load model...") download_model(url) @@ -90,7 +93,8 @@ def forward(self, x): # verify the test from utils import load_dataset - inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'squeezenet1.1', 'test_data_set_0')) + inputs, ref_outputs = load_dataset( + os.path.join('/tmp', 'squeezenet1.1', 'test_data_set_0')) x_batch = tensor.Tensor(device=dev, data=inputs[0]) outputs = model.forward(x_batch) for ref_o, o in zip(ref_outputs, outputs): @@ -110,8 +114,5 @@ def forward(self, x): scores = tensor.to_numpy(y) scores = np.squeeze(scores) a = np.argsort(scores)[::-1] - # The guide stated the output needs to be [1,1000,1,1], but here it is [1000,1,1] - # The dimension can be expanded if [1,1000,1,1] is desired - # a = np.expand_dims(a, axis=0) for i in a[0:5]: - logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) \ No newline at end of file + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) From 8f7b4ef976746e8b9d9a3ceaaad0f51d1bfa771c Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Sat, 30 May 2020 00:43:49 +0800 Subject: [PATCH 05/24] Remove shufflenet files --- examples/onnx/shufflenet.py | 114 ---------------------------------- examples/onnx/shufflenet2.py | 116 ----------------------------------- 2 files changed, 230 deletions(-) delete mode 100644 examples/onnx/shufflenet.py delete mode 100644 examples/onnx/shufflenet2.py diff --git a/examples/onnx/shufflenet.py b/examples/onnx/shufflenet.py deleted file mode 100644 index c8dc24cc9b..0000000000 --- a/examples/onnx/shufflenet.py +++ /dev/null @@ -1,114 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under th - -import os -import numpy as np -from PIL import Image - -from singa import device -from singa import tensor -from singa import autograd -from singa import sonnx -import onnx -from utils import download_model, update_batch_size, check_exist_or_download - -import logging -logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') - - -def preprocess(img): - img = img.resize((256, 256)) - img = img.crop((16, 16, 240, 240)) - img = np.array(img).astype(np.float32) / 255. - img = np.rollaxis(img, 2, 0) - for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], - [0.229, 0.224, 0.225]): - img[channel, :, :] -= mean - img[channel, :, :] /= std - img = np.expand_dims(img, axis=0) - return img - -def get_image_labe(): - # download label - label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' - with open(check_exist_or_download(label_url), 'r') as f: - labels = [l.rstrip() for l in f] - - # download image - image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' - img = Image.open(check_exist_or_download(image_url)) - return img, labels - -class Infer: - - def __init__(self, sg_ir): - self.sg_ir = sg_ir - for idx, tens in sg_ir.tensor_map.items(): - # allow the tensors to be updated - tens.requires_grad = True - tens.stores_grad = True - sg_ir.tensor_map[idx] = tens - - def forward(self, x): - return sg_ir.run([x])[0] - - -if __name__ == "__main__": - - url = 'https://s3.amazonaws.com/download.onnx/models/opset_9/shufflenet.tar.gz' - download_dir = '/tmp/' - model_path = os.path.join(download_dir, 'shufflenet', 'model.onnx') - - logging.info("onnx load model...") - download_model(url) - onnx_model = onnx.load(model_path) - - # set batch size - onnx_model = update_batch_size(onnx_model, 1) - - # prepare the model - logging.info("prepare model...") - dev = device.get_default_device() - sg_ir = sonnx.prepare(onnx_model, device=dev) - autograd.training = False - model = Infer(sg_ir) - - # verifty the test - # from utils import load_dataset - # inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'resnet18v1', 'test_data_set_0')) - # x_batch = tensor.Tensor(device=dev, data=inputs[0]) - # outputs = model.forward(x_batch) - # for ref_o, o in zip(ref_outputs, outputs): - # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) - - # inference - logging.info("preprocessing...") - img, labels = get_image_labe() - img = preprocess(img) - - logging.info("model running...") - x_batch = tensor.Tensor(device=dev, data=img) - y = model.forward(x_batch) - - logging.info("postprocessing...") - y = tensor.softmax(y) - scores = tensor.to_numpy(y) - scores = np.squeeze(scores) - a = np.argsort(scores)[::-1] - for i in a[0:5]: - logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) \ No newline at end of file diff --git a/examples/onnx/shufflenet2.py b/examples/onnx/shufflenet2.py deleted file mode 100644 index 8b76ad22d1..0000000000 --- a/examples/onnx/shufflenet2.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under th - -import os -import numpy as np -from PIL import Image - -from singa import device -from singa import tensor -from singa import autograd -from singa import sonnx -import onnx -from utils import download_model, update_batch_size, check_exist_or_download - -import logging -logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') - - -def preprocess(img): - img = img.resize((256, 256)) - img = img.crop((16, 16, 240, 240)) - img = np.array(img).astype(np.float32) / 255. - img = np.rollaxis(img, 2, 0) - for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], - [0.229, 0.224, 0.225]): - img[channel, :, :] -= mean - img[channel, :, :] /= std - img = np.expand_dims(img, axis=0) - return img - -def get_image_labe(): - # download label - label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' - with open(check_exist_or_download(label_url), 'r') as f: - labels = [l.rstrip() for l in f] - - # download image - image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' - img = Image.open(check_exist_or_download(image_url)) - return img, labels - -class Infer: - - def __init__(self, sg_ir): - self.sg_ir = sg_ir - for idx, tens in sg_ir.tensor_map.items(): - # allow the tensors to be updated - tens.requires_grad = True - tens.stores_grad = True - sg_ir.tensor_map[idx] = tens - - def forward(self, x): - return sg_ir.run([x])[0] - - -if __name__ == "__main__": - - url = 'https://github.com/onnx/models/raw/master/vision/classification/shufflenet/model/shufflenet-v2-10.tar.gz' - download_dir = '/tmp/' - model_path = os.path.join(download_dir, 'model', 'test_shufflenetv2', 'model.onnx') - - logging.info("onnx load model...") - download_model(url) - ''' - onnx_model = onnx.load(model_path) - - # set batch size - onnx_model = update_batch_size(onnx_model, 1) - - # prepare the model - logging.info("prepare model...") - dev = device.get_default_device() - sg_ir = sonnx.prepare(onnx_model, device=dev) - autograd.training = False - model = Infer(sg_ir) - - # verifty the test - # from utils import load_dataset - # inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'resnet18v1', 'test_data_set_0')) - # x_batch = tensor.Tensor(device=dev, data=inputs[0]) - # outputs = model.forward(x_batch) - # for ref_o, o in zip(ref_outputs, outputs): - # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) - - # inference - logging.info("preprocessing...") - img, labels = get_image_labe() - img = preprocess(img) - - logging.info("model running...") - x_batch = tensor.Tensor(device=dev, data=img) - y = model.forward(x_batch) - - logging.info("postprocessing...") - y = tensor.softmax(y) - scores = tensor.to_numpy(y) - scores = np.squeeze(scores) - a = np.argsort(scores)[::-1] - for i in a[0:5]: - logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) - ''' \ No newline at end of file From 1f88218fce4f83d597920ad2ec714ffe680e1e4e Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Sat, 30 May 2020 14:38:50 +0800 Subject: [PATCH 06/24] Comment out testing part --- examples/onnx/squeezenet.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/onnx/squeezenet.py b/examples/onnx/squeezenet.py index 6b3601eeae..8a6ecf5d0f 100644 --- a/examples/onnx/squeezenet.py +++ b/examples/onnx/squeezenet.py @@ -92,13 +92,13 @@ def forward(self, x): model = Infer(sg_ir) # verify the test - from utils import load_dataset - inputs, ref_outputs = load_dataset( - os.path.join('/tmp', 'squeezenet1.1', 'test_data_set_0')) - x_batch = tensor.Tensor(device=dev, data=inputs[0]) - outputs = model.forward(x_batch) - for ref_o, o in zip(ref_outputs, outputs): - np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) + # from utils import load_dataset + # inputs, ref_outputs = load_dataset( + # os.path.join('/tmp', 'squeezenet1.1', 'test_data_set_0')) + # x_batch = tensor.Tensor(device=dev, data=inputs[0]) + # outputs = model.forward(x_batch) + # for ref_o, o in zip(ref_outputs, outputs): + # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) # inference logging.info("preprocessing...") From 9c65c9949337e4f811b28b0306f62baabf1f6e60 Mon Sep 17 00:00:00 2001 From: Alvinnyk Date: Mon, 1 Jun 2020 10:26:54 +0800 Subject: [PATCH 07/24] Implement vgg19 model from onnx model zoo --- examples/onnx/vgg19.py | 116 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 examples/onnx/vgg19.py diff --git a/examples/onnx/vgg19.py b/examples/onnx/vgg19.py new file mode 100644 index 0000000000..49606cb233 --- /dev/null +++ b/examples/onnx/vgg19.py @@ -0,0 +1,116 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under th + +import os +import numpy as np +from PIL import Image + +from singa import device +from singa import tensor +from singa import autograd +from singa import sonnx +import onnx +from utils import download_model, update_batch_size, check_exist_or_download + +import logging +logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') + + +def preprocess(img): + img = img.resize((256, 256)) + img = img.crop((16, 16, 240, 240)) + img = np.array(img).astype(np.float32) / 255. + img = np.rollaxis(img, 2, 0) + for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]): + img[channel, :, :] -= mean + img[channel, :, :] /= std + img = np.expand_dims(img, axis=0) + return img + + +def get_image_label(): + # download label + label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' + with open(check_exist_or_download(label_url), 'r') as f: + labels = [l.rstrip() for l in f] + + # download image + image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' + img = Image.open(check_exist_or_download(image_url)) + return img, labels + + +class Infer: + + def __init__(self, sg_ir): + self.sg_ir = sg_ir + for idx, tens in sg_ir.tensor_map.items(): + # allow the tensors to be updated + tens.requires_grad = True + tens.stores_grad = True + sg_ir.tensor_map[idx] = tens + + def forward(self, x): + return sg_ir.run([x])[0] + + +if __name__ == "__main__": + url = 'https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg19-7.tar.gz' + download_dir = '/tmp/' + model_path = os.path.join(download_dir, 'vgg19', 'vgg19.onnx') + + logging.info("onnx load model...") + download_model(url) + onnx_model = onnx.load(model_path) + + # set batch size + onnx_model = update_batch_size(onnx_model, 1) + + # prepare the model + logging.info("prepare model...") + # dev = device.get_default_device() + dev = device.create_cuda_gpu() + sg_ir = sonnx.prepare(onnx_model, device=dev) + autograd.training = False + model = Infer(sg_ir) + + # verify the test + # from utils import load_dataset + # inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'vgg19', 'test_data_set_0')) + # x_batch = tensor.Tensor(device=dev, data=inputs[0]) + # outputs = model.forward(x_batch) + # for ref_o, o in zip(ref_outputs, outputs): + # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) + + # inference + logging.info("preprocessing...") + img, labels = get_image_label() + img = preprocess(img) + + logging.info("model running...") + x_batch = tensor.Tensor(device=dev, data=img) + y = model.forward(x_batch) + + logging.info("postprocessing...") + y = tensor.softmax(y) + scores = tensor.to_numpy(y) + scores = np.squeeze(scores) + a = np.argsort(scores)[::-1] + for i in a[0:5]: + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) From f334f6bddc41f36ec99702965a17e655f9855b77 Mon Sep 17 00:00:00 2001 From: Shashankwer Date: Wed, 3 Jun 2020 04:39:23 +0800 Subject: [PATCH 08/24] Shufflenet v1 Added ONNX example for shufflenetv1 --- examples/onnx/shufflenetv1.py | 109 ++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 examples/onnx/shufflenetv1.py diff --git a/examples/onnx/shufflenetv1.py b/examples/onnx/shufflenetv1.py new file mode 100644 index 0000000000..35fa777cc6 --- /dev/null +++ b/examples/onnx/shufflenetv1.py @@ -0,0 +1,109 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under th + +import os +import logging +import numpy as np +from PIL import Image + +from singa import device +from singa import tensor +from singa import autograd +from singa import sonnx +import onnx +from utils import download_model +from utils import update_batch_size +from utils import check_exist_or_download + +logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') + + +def preprocess(img): + img = img.resize((256, 256)) + img = img.crop((16, 16, 240, 240)) + img = np.array(img).astype(np.float32) / 255. + img = np.rollaxis(img, 2, 0) + for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]): + img[channel, :, :] -= mean + img[channel, :, :] /= std + img = np.expand_dims(img, axis=0) + return img + + +def get_image_label(): + # download label + label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' + with open(check_exist_or_download(label_url), 'r') as f: + labels = [l.rstrip() for l in f] + image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' + img = Image.open(check_exist_or_download(image_url)) + return img, labels + + +class Infer: + + def __init__(self, sg_ir): + self.sg_ir = sg_ir + for idx, tens in sg_ir.tensor_map.items(): + tens.require_grad = True + tens.store_grad = True + sg_ir.tensor_map[idx] = tens + + def forward(self, x): + return sg_ir.run([x])[0] + + +if __name__ == '__main__': + url = 'https://github.com/onnx/models/raw/master/vision/classification/shufflenet/model/shufflenet-9.tar.gz' + download_dir = "/tmp/" + model_path = os.path.join(download_dir, 'shufflenet', 'model.onnx') + logging.info("onnx load model....") + download_model(url) + onnx_model = onnx.load(model_path) + # setting batch size + onnx_model = update_batch_size(onnx_model, 1) + # preparing the model + logging.info("preparing model...") + dev = device.create_cuda_gpu() + sg_ir = sonnx.prepare(onnx_model, device=dev) + autograd.training = False + model = Infer(sg_ir) + + # verifying the test dataset + #from utils import load_dataset + #inputs,ref_outputs = load_dataset(os.path.join('/tmp','shufflenet','test_data_set_0')) + #x_batch = tensor.Tensor(device = dev,data=inputs[0]) + #outputs = model.forward(x_batch) + # for ref_o,o in zip(ref_outputs,outputs): + # np.testing.assert_almost_equal(ref_o,tensor.to_numpy(o),4) + + # inference + logging.info("preprocessing...") + img, labels = get_image_label() + img = preprocess(img) + x_batch = tensor.Tensor(device=dev, data=img) + logging.info("model running....") + y = model.forward(x_batch) + logging.info("postprocessing....") + y = tensor.softmax(y) + scores = tensor.to_numpy(y) + scores = np.squeeze(scores) + a = np.argsort(scores)[::-1] + for i in a[0:5]: + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) From 3ce29669940c35591b732a59c775b2698b2016e2 Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Fri, 5 Jun 2020 15:56:52 +0800 Subject: [PATCH 09/24] Implement densenet121 from ONNX --- examples/onnx/densenet121.py | 116 +++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 examples/onnx/densenet121.py diff --git a/examples/onnx/densenet121.py b/examples/onnx/densenet121.py new file mode 100644 index 0000000000..f809ac4b83 --- /dev/null +++ b/examples/onnx/densenet121.py @@ -0,0 +1,116 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under th + +import os +import numpy as np +from PIL import Image + +from singa import device +from singa import tensor +from singa import autograd +from singa import sonnx +import onnx +from utils import download_model, update_batch_size, check_exist_or_download + +import logging +logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') + + +def preprocess(img): + img = img.resize((256, 256)) + img = img.crop((16, 16, 240, 240)) + img = np.array(img).astype(np.float32) / 255. + img = np.rollaxis(img, 2, 0) + for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]): + img[channel, :, :] -= mean + img[channel, :, :] /= std + img = np.expand_dims(img, axis=0) + return img + + +def get_image_labe(): + # download label + label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt' + with open(check_exist_or_download(label_url), 'r') as f: + labels = [l.rstrip() for l in f] + + # download image + image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg' + img = Image.open(check_exist_or_download(image_url)) + return img, labels + + +class Infer: + + def __init__(self, sg_ir): + self.sg_ir = sg_ir + for idx, tens in sg_ir.tensor_map.items(): + # allow the tensors to be updated + tens.requires_grad = True + tens.stores_grad = True + sg_ir.tensor_map[idx] = tens + + def forward(self, x): + return sg_ir.run([x])[0] + + +if __name__ == "__main__": + url = 'https://s3.amazonaws.com/download.onnx/models/opset_9/densenet121.tar.gz' + download_dir = '/tmp/' + model_path = os.path.join(download_dir, 'densenet121', 'model.onnx') + + logging.info("onnx load model...") + download_model(url) + onnx_model = onnx.load(model_path) + + # set batch size + onnx_model = update_batch_size(onnx_model, 1) + + # prepare the model + logging.info("prepare model...") + dev = device.get_default_device() + sg_ir = sonnx.prepare(onnx_model, device=dev) + autograd.training = False + model = Infer(sg_ir) + + # verifty the test + # from utils import load_dataset + # inputs, ref_outputs = load_dataset( + # os.path.join('/tmp', 'densenet121', 'test_data_set_0')) + # x_batch = tensor.Tensor(device=dev, data=inputs[0]) + # outputs = model.forward(x_batch) + # for ref_o, o in zip(ref_outputs, outputs): + # np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4) + + # inference + logging.info("preprocessing...") + img, labels = get_image_labe() + img = preprocess(img) + + logging.info("model running...") + x_batch = tensor.Tensor(device=dev, data=img) + y = model.forward(x_batch) + + logging.info("postprocessing...") + y = tensor.softmax(y) + scores = tensor.to_numpy(y) + scores = np.squeeze(scores) + a = np.argsort(scores)[::-1] + for i in a[0:5]: + logging.info('class=%s ; probability=%f' % (labels[i], scores[i])) From ace84fe3bcf3e4fba5abbd8d73b62cd688f74223 Mon Sep 17 00:00:00 2001 From: agnesnatasya Date: Fri, 5 Jun 2020 15:58:40 +0800 Subject: [PATCH 10/24] Change device to GPU --- examples/onnx/densenet121.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/onnx/densenet121.py b/examples/onnx/densenet121.py index f809ac4b83..14cfa5f04a 100644 --- a/examples/onnx/densenet121.py +++ b/examples/onnx/densenet121.py @@ -84,7 +84,7 @@ def forward(self, x): # prepare the model logging.info("prepare model...") - dev = device.get_default_device() + dev = device.create_cuda_gpu() sg_ir = sonnx.prepare(onnx_model, device=dev) autograd.training = False model = Infer(sg_ir) From 2b9c1f360ace9ec8edd7e33a4fdd42f59913d96b Mon Sep 17 00:00:00 2001 From: Sathya Narrayanan Date: Fri, 19 Jun 2020 20:07:23 +0800 Subject: [PATCH 11/24] Updated activation.cc Fix spelling Check --- src/model/layer/activation.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/model/layer/activation.cc b/src/model/layer/activation.cc index eb90d87eb3..8e09f59743 100644 --- a/src/model/layer/activation.cc +++ b/src/model/layer/activation.cc @@ -62,7 +62,7 @@ const Tensor Activation::Forward(int flag, const Tensor& input) { output = ReLU(input); if (flag & kTrain) buf_.push(input); } else - LOG(FATAL) << "Unkown activation: " << mode_; + LOG(FATAL) << "Unknown activation: " << mode_; return output; } From 2ac89cc2baf4bcbdeadc234c51c454cb8f93ba38 Mon Sep 17 00:00:00 2001 From: wangwei Date: Wed, 23 Sep 2020 10:55:35 +0800 Subject: [PATCH 12/24] fix the license issue --- LICENSE | 32 +++++++++++++++++++ README.md | 6 ++-- java/pom.xml | 2 ++ setup.py | 2 +- .../centos6/cuda10/Dockerfile.manylinux2014 | 2 +- 5 files changed, 39 insertions(+), 5 deletions(-) diff --git a/LICENSE b/LICENSE index 38a240c2f6..a1c8e433a3 100644 --- a/LICENSE +++ b/LICENSE @@ -511,6 +511,7 @@ SINGA bundles the following under Apache License Version 2.0: examples/onnx/tokenization.py examples/onnx/run_onnx_squad.py examples/onnx/inputs.json +examples/onnx/gpt2/requirements.txt # Copyright 2018 The Google AI Language Team Authors. # @@ -525,3 +526,34 @@ examples/onnx/inputs.json # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + + +=============================================================================== +SINGA bundles the following under new BSD 3-clause license: +tool/docker/devel/centos6/cuda10/cuda.repo + +Copyright (c) 2019,2020 NVIDIA CORPORATION. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of NVIDIA CORPORATION nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 849b2e6b66..08d4149cdc 100644 --- a/README.md +++ b/README.md @@ -21,9 +21,9 @@ # Apache SINGA -[![Native Ubuntu build status](https://github.com/apache/singa/workflows/Native-Ubuntu/badge.svg) -[![Native Mac build status](https://github.com/apache/singa/workflows/Native-MacOS/badge.svg) -[![conda build status](https://github.com/apache/singa/workflows/conda/badge.svg) +![Native Ubuntu build status](https://github.com/apache/singa/workflows/Native-Ubuntu/badge.svg) +![Native Mac build status](https://github.com/apache/singa/workflows/Native-MacOS/badge.svg) +![conda build status](https://github.com/apache/singa/workflows/conda/badge.svg) [![Documentation Status](https://readthedocs.org/projects/apache-singa/badge/?version=latest)](https://apache-singa.readthedocs.io/en/latest/?badge=latest) ![License](http://img.shields.io/:license-Apache%202.0-blue.svg) [![Follow Apache SINGA on Twitter](https://img.shields.io/twitter/follow/apachesinga.svg?style=social&label=Follow)](https://twitter.com/ApacheSinga) diff --git a/java/pom.xml b/java/pom.xml index b16dbe39b8..5ff0d250b7 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -67,6 +67,7 @@ test/gtest/** examples/onnx/bert/inputs.json examples/onnx/test_onnx_backend.py + examples/onnx/gpt2/requirements.txt include/singa/utils/tinydir.h include/singa/utils/cuda_utils.h src/core/tensor/distribution.cl @@ -79,6 +80,7 @@ cmake/Thirdparty/GetGitRevisionDescription.cmake src/api/numpy.i tool/cpplint.py + tool/docker/devel/centos6/cuda10/cuda.repo travis*.log .gitmodules java/target/* diff --git a/setup.py b/setup.py index 1abc48c0db..f7a3f19281 100644 --- a/setup.py +++ b/setup.py @@ -83,7 +83,7 @@ from datetime import date # stable version -VERSION = '3.0.0' +VERSION = '3.1.0.rc1' # get the git hash # git_hash = subprocess.check_output(["git", "describe"]).strip().split('-')[-1][1:] # comment the next line to build wheel for stable version diff --git a/tool/docker/devel/centos6/cuda10/Dockerfile.manylinux2014 b/tool/docker/devel/centos6/cuda10/Dockerfile.manylinux2014 index 1adb1b1853..d3aeaff4bb 100644 --- a/tool/docker/devel/centos6/cuda10/Dockerfile.manylinux2014 +++ b/tool/docker/devel/centos6/cuda10/Dockerfile.manylinux2014 @@ -15,7 +15,7 @@ # limitations under the License. -# The latest tag uses gcc 9, which is too high nvcc. +# The latest tag uses gcc 9, which is too high for nvcc. # The following tag uses gcc 8, which works with nvcc. FROM quay.io/pypa/manylinux2014_x86_64:2020-05-01-b37d76b From 47017c1261c1e9f52600af55ee05c90ca6d4a74e Mon Sep 17 00:00:00 2001 From: wang wei Date: Thu, 24 Sep 2020 10:54:12 +0800 Subject: [PATCH 13/24] fix the link issue in readme file --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 849b2e6b66..08d4149cdc 100644 --- a/README.md +++ b/README.md @@ -21,9 +21,9 @@ # Apache SINGA -[![Native Ubuntu build status](https://github.com/apache/singa/workflows/Native-Ubuntu/badge.svg) -[![Native Mac build status](https://github.com/apache/singa/workflows/Native-MacOS/badge.svg) -[![conda build status](https://github.com/apache/singa/workflows/conda/badge.svg) +![Native Ubuntu build status](https://github.com/apache/singa/workflows/Native-Ubuntu/badge.svg) +![Native Mac build status](https://github.com/apache/singa/workflows/Native-MacOS/badge.svg) +![conda build status](https://github.com/apache/singa/workflows/conda/badge.svg) [![Documentation Status](https://readthedocs.org/projects/apache-singa/badge/?version=latest)](https://apache-singa.readthedocs.io/en/latest/?badge=latest) ![License](http://img.shields.io/:license-Apache%202.0-blue.svg) [![Follow Apache SINGA on Twitter](https://img.shields.io/twitter/follow/apachesinga.svg?style=social&label=Follow)](https://twitter.com/ApacheSinga) From 2705c65bdb62f5b3e05a519cefbb9b80d34d2c3e Mon Sep 17 00:00:00 2001 From: wangwei Date: Thu, 24 Sep 2020 10:58:15 +0800 Subject: [PATCH 14/24] change the filename to be consistent with the docs --- examples/mlp/{module.py => model.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/mlp/{module.py => model.py} (100%) diff --git a/examples/mlp/module.py b/examples/mlp/model.py similarity index 100% rename from examples/mlp/module.py rename to examples/mlp/model.py From 389bd56507d709c36ba893a8e1366807737b32ae Mon Sep 17 00:00:00 2001 From: Joddiy Zhang Date: Thu, 24 Sep 2020 11:53:05 +0800 Subject: [PATCH 15/24] Update LICENSE add ro_bert_a's requirements --- LICENSE | 1 + 1 file changed, 1 insertion(+) diff --git a/LICENSE b/LICENSE index a1c8e433a3..e968ba335a 100644 --- a/LICENSE +++ b/LICENSE @@ -512,6 +512,7 @@ examples/onnx/tokenization.py examples/onnx/run_onnx_squad.py examples/onnx/inputs.json examples/onnx/gpt2/requirements.txt +examples/onnx/ro_bert_a/requirements.txt # Copyright 2018 The Google AI Language Team Authors. # From e1f346df67d37d36012243f412702f0a27a123bc Mon Sep 17 00:00:00 2001 From: Joddiy Zhang Date: Thu, 24 Sep 2020 11:55:03 +0800 Subject: [PATCH 16/24] Update pom.xml add ro_bert_a requirements --- java/pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/java/pom.xml b/java/pom.xml index 5ff0d250b7..1f68adbc66 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -68,6 +68,7 @@ examples/onnx/bert/inputs.json examples/onnx/test_onnx_backend.py examples/onnx/gpt2/requirements.txt + examples/onnx/ro_bert_a/requirements.txt include/singa/utils/tinydir.h include/singa/utils/cuda_utils.h src/core/tensor/distribution.cl From 0edff7babe3907f58783de3d5e00eef1ba287c00 Mon Sep 17 00:00:00 2001 From: joddiy Date: Thu, 24 Sep 2020 11:28:31 +0800 Subject: [PATCH 17/24] fix some issues and requirements --- examples/onnx/gpt2/gpt2.py | 2 +- examples/onnx/ro_bert_a/requirements.txt | 1 + examples/onnx/{ => ro_bert_a}/ro_bert_a.py | 12 ++---------- 3 files changed, 4 insertions(+), 11 deletions(-) create mode 100644 examples/onnx/ro_bert_a/requirements.txt rename examples/onnx/{ => ro_bert_a}/ro_bert_a.py (96%) diff --git a/examples/onnx/gpt2/gpt2.py b/examples/onnx/gpt2/gpt2.py index 56b7cfed6a..dd27334358 100644 --- a/examples/onnx/gpt2/gpt2.py +++ b/examples/onnx/gpt2/gpt2.py @@ -106,5 +106,5 @@ def train_one_batch(self, x, y): y = tensor.Tensor(device=dev, data=y) x = tensor.concatenate([x, y], 2) - text = tokenizer.decode(output) + text = postprocess(output) print(text) \ No newline at end of file diff --git a/examples/onnx/ro_bert_a/requirements.txt b/examples/onnx/ro_bert_a/requirements.txt new file mode 100644 index 0000000000..14693ad72e --- /dev/null +++ b/examples/onnx/ro_bert_a/requirements.txt @@ -0,0 +1 @@ +transformers==2.5.1 \ No newline at end of file diff --git a/examples/onnx/ro_bert_a.py b/examples/onnx/ro_bert_a/ro_bert_a.py similarity index 96% rename from examples/onnx/ro_bert_a.py rename to examples/onnx/ro_bert_a/ro_bert_a.py index 5b6ac0af3d..b6b02ed9ee 100644 --- a/examples/onnx/ro_bert_a.py +++ b/examples/onnx/ro_bert_a/ro_bert_a.py @@ -17,10 +17,6 @@ # under the License. # - -import sys -sys.path.append('/singa/build/python/') - import os import numpy as np @@ -30,6 +26,8 @@ from singa import autograd import onnx +import sys +sys.path.append(os.path.dirname(__file__) + '/..') from utils import download_model, check_exist_or_download import logging @@ -45,12 +43,6 @@ def preprocess(): tokens = np.array(tokens) return tokens.reshape([1, -1]).astype(np.float32) - -def postprocess(out): - text = tokenizer.decode(out) - return text - - class MyModel(sonnx.SONNXModel): def __init__(self, onnx_model): From 7b57a2ed4db1b245aad3a6877941cea72885fd88 Mon Sep 17 00:00:00 2001 From: wang wei Date: Sun, 27 Sep 2020 15:08:26 +0800 Subject: [PATCH 18/24] Update the release note for v3.1.0.rc1 --- RELEASE_NOTES | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 8491a6b9b0..f7852ee918 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,3 +1,44 @@ +Release Notes - SINGA - Version singa-3.1.0 + +SINGA is a distributed deep learning library. + +This release includes following changes: + + * Tensor core: + * Support tensor transformation (reshape, transpose) for tensors up to 6 dimensions. + * Implement traverse_unary_transform in Cuda backend, which is similar to CPP backend one. + + * Add new tensor operators into the autograd module, including + CosSim, DepthToSpace, Embedding, Erf, Expand, Floor, Pad, Round, Rounde, SpaceToDepth, UpSample, Where. + The corresponding ONNX operators are thus supported by SINGA. + + * Add Embedding and Gemm into the layer module. + + * Add SGD operators to opt module, including RMSProp, Adam, and AdaGrad. + + * Extend the sonnx module to support + DenseNet121, ShuffleNetv1, ShuffleNetv2, SqueezeNet, VGG19, GPT2, and RoBERTa, + + * Reconstruct sonnx to + * Support creating operators from both layer and autograd. + * Re-write SingaRep to provide a more powerful intermediate representation of SINGA. + * Add a SONNXModel which implements from Model to provide uniform API and features. + + * Add one example that trains a BiLSTM model over the InsuranceQA data. + + * Replace the Travis CI with Github workflow. Add quality and coverage management. + + * Add compiling and packaging scripts to creat wheel packages for distribution. + + * Fix bugs + * Fix IMDB LSTM model example training script. + * Fix Tensor operation Mult on Broadcasting use cases. + * Gaussian function on Tensor now can run on Tensor with odd size. + * Updated a testing helper function gradients() in autograd to lookup param gradient by param python object id for testing purpose. + + +---------------------------------------------------------------------------------------------- + Release Notes - SINGA - Version singa-3.0.0 SINGA is a distributed deep learning library. From af7b6bec81f4a3c252cd40ef56ab5167df97ca82 Mon Sep 17 00:00:00 2001 From: joddiy Date: Tue, 29 Sep 2020 17:39:26 +0800 Subject: [PATCH 19/24] fix_expand_operator --- python/singa/autograd.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/python/singa/autograd.py b/python/singa/autograd.py index ddfbf03967..76a645ea8e 100644 --- a/python/singa/autograd.py +++ b/python/singa/autograd.py @@ -5051,15 +5051,9 @@ def forward(self, x): self.shape = self.shape.tolist() else: self.shape = list(self.shape) - self.dim_changed = True self.x_shape = list(x.shape()) x_shape = self.x_shape.copy() - for s_1, s_2 in zip(self.shape[::-1], x_shape[::-1]): - if s_1 != 1 and s_2 != 1 and s_1 != s_2: - if len(self.shape) != len(x_shape): - assert False, ('not support dim_unchanged mode') - self.dim_changed = False - break + self.dim_changed = True if len(self.shape) != len(x_shape) else False if self.dim_changed: tmp_tensor = singa.Tensor(self.shape, x.device()) tmp_tensor.SetFloatValue(1.) From c8c80635d05a381abbb15569f5ac4838bbb24e23 Mon Sep 17 00:00:00 2001 From: Json Lee Date: Fri, 9 Oct 2020 10:35:03 +0800 Subject: [PATCH 20/24] fix README missing install link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 08d4149cdc..685688145c 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ Distributed deep learning system ## Quick Start -* [Installation](doc/en/docs/installation.md) +* [Installation](http://singa.apache.org/docs/installation/) * [Examples](examples) ## Issues From 43e912f185565a94822a823383ca301c5dd56ff8 Mon Sep 17 00:00:00 2001 From: joddiy Date: Tue, 13 Oct 2020 22:01:47 +0800 Subject: [PATCH 21/24] fix onnx training examples --- examples/onnx/training/model.json | 60 ++++++++++++++++++++++--------- examples/onnx/training/train.py | 12 ++++--- 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/examples/onnx/training/model.json b/examples/onnx/training/model.json index 1fe52b160d..f72d1a7413 100644 --- a/examples/onnx/training/model.json +++ b/examples/onnx/training/model.json @@ -3,82 +3,110 @@ "name": "ResNet-18 Version 1", "description": "ResNet v1 uses post-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v1/resnet18v1.tar.gz", - "path": "resnet18v1/resnet18v1.onnx" + "path": "resnet18v1/resnet18v1.onnx", + "last_layers": -3, + "last_layers_dim": 512 }, "resnet34v1": { "name": "ResNet-34 Version 1", "description": "ResNet v1 uses post-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet34v1/resnet34v1.tar.gz", - "path": "resnet34v1/resnet34v1.onnx" + "path": "resnet34v1/resnet34v1.onnx", + "last_layers": -3, + "last_layers_dim": 512 }, "resnet50v1": { "name": "ResNet-50 Version 1", "description": "ResNet v1 uses post-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v1/resnet50v1.tar.gz", - "path": "resnet50v1/resnet50v1.onnx" + "path": "resnet50v1/resnet50v1.onnx", + "last_layers": -3, + "last_layers_dim": 2048 }, "resnet101v1": { "name": "ResNet-101 Version 1", "description": "ResNet v1 uses post-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet101v1/resnet101v1.tar.gz", - "path": "resnet101v1/resnet101v1.onnx" + "path": "resnet101v1/resnet101v1.onnx", + "last_layers": -3, + "last_layers_dim": 2048 }, "resnet152v1": { "name": "ResNet-152 Version 1", "description": "ResNet v1 uses post-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.tar.gz", - "path": "resnet152v1/resnet152v1.onnx" + "path": "resnet152v1/resnet152v1.onnx", + "last_layers": -3, + "last_layers_dim": 2048 }, "resnet18v2": { "name": "ResNet-18 Version 2", "description": "ResNet v2 uses pre-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v2/resnet18v2.tar.gz", - "path": "resnet18v2/resnet18v2.onnx" + "path": "resnet18v2/resnet18v2.onnx", + "last_layers": -3, + "last_layers_dim": 512 }, "resnet34v2": { "name": "ResNet-34 Version 2", "description": "ResNet v2 uses pre-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet34v2/resnet34v2.tar.gz", - "path": "resnet34v2/resnet34v2.onnx" + "path": "resnet34v2/resnet34v2.onnx", + "last_layers": -3, + "last_layers_dim": 512 }, "resnet50v2": { "name": "ResNet-50 Version 2", "description": "ResNet v2 uses pre-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.tar.gz", - "path": "resnet50v2/resnet50v2.onnx" + "path": "resnet50v2/resnet50v2.onnx", + "last_layers": -3, + "last_layers_dim": 2048 }, "resnet101v2": { "name": "ResNet-101 Version 2", "description": "ResNet v2 uses pre-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet101v2/resnet101v2.tar.gz", - "path": "resnet101v2/resnet101v2.onnx" + "path": "resnet101v2/resnet101v2.onnx", + "last_layers": -3, + "last_layers_dim": 2048 }, "resnet152v2": { "name": "ResNet-152 Version 2", "description": "ResNet v2 uses pre-activation for the residual blocks", "url": "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v2/resnet152v2.tar.gz", - "path": "resnet152v2/resnet152v2.onnx" + "path": "resnet152v2/resnet152v2.onnx", + "last_layers": -3, + "last_layers_dim": 2048 }, "vgg16": { "name": "VGG-16", "url": "https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg16-7.tar.gz", - "path": "vgg16/vgg16.onnx" + "path": "vgg16/vgg16.onnx", + "last_layers": -3, + "last_layers_dim": 4096 }, "vgg16bn": { "name": "VGG-16 with batch normalization", "description": "VGG have batch normalization applied after each convolutional layer", "url": "https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg16-bn-7.tar.gz", - "path": "vgg16-bn/vgg16-bn.onnx" + "path": "vgg16-bn/vgg16-bn.onnx", + "last_layers": -3, + "last_layers_dim": 4096 }, "vgg19": { "name": "VGG-19", - "url": "https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg16-9.tar.gz", - "path": "vgg19/vgg19.onnx" + "url": "https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg19-7.tar.gz", + "path": "vgg19/vgg19.onnx", + "last_layers": -3, + "last_layers_dim": 4096 }, "vgg19bn": { "name": "VGG-19 with batch normalization", "description": "VGG have batch normalization applied after each convolutional layer", - "url": "https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg16-bn-9.tar.gz", - "path": "vgg19-bn/vgg19-bn.onnx" + "url": "https://github.com/onnx/models/raw/master/vision/classification/vgg/model/vgg19-bn-7.tar.gz", + "path": "vgg19-bn/vgg19-bn.onnx", + "last_layers": -3, + "last_layers_dim": 4096 } } \ No newline at end of file diff --git a/examples/onnx/training/train.py b/examples/onnx/training/train.py index fca072ece9..8407bfe7d4 100644 --- a/examples/onnx/training/train.py +++ b/examples/onnx/training/train.py @@ -104,18 +104,18 @@ def resize_dataset(x, image_size): class MyModel(sonnx.SONNXModel): - def __init__(self, onnx_model, num_classes=10, num_channels=3): + def __init__(self, onnx_model, num_classes=10, num_channels=3, last_layers=-1, in_dim=1000): super(MyModel, self).__init__(onnx_model) self.num_classes = num_classes self.input_size = 224 self.dimension = 4 self.num_channels = num_channels self.num_classes = num_classes - self.linear = layer.Linear(512, num_classes) + self.last_layers = last_layers + self.linear = layer.Linear(in_dim, num_classes) def forward(self, *x): - # if you change to other models, please update the output name here - y = super(MyModel, self).forward(*x, aux_output=['flatten_170'])[1] + y = super(MyModel, self).forward(*x, last_layers=self.last_layers)[0] y = self.linear(y) return y @@ -175,7 +175,9 @@ def run(global_rank, onnx_model = onnx.load(os.path.join('/tmp', model_config['path'])) model = MyModel(onnx_model, num_channels=num_channels, - num_classes=num_classes) + num_classes=num_classes, + last_layers=model_config['last_layers'], + in_dim=model_config['last_layers_dim']) # For distributed training, sequential gives better performance if hasattr(sgd, "communicator"): From a3e88c95cda8e4aca3a8ad6604ef1c09e69676c0 Mon Sep 17 00:00:00 2001 From: Json Lee Date: Wed, 14 Oct 2020 19:50:46 +0800 Subject: [PATCH 22/24] fix mlp module import error --- examples/cnn/train_cnn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/cnn/train_cnn.py b/examples/cnn/train_cnn.py index 4c74b99801..e4fd9629a6 100644 --- a/examples/cnn/train_cnn.py +++ b/examples/cnn/train_cnn.py @@ -144,8 +144,8 @@ def run(global_rank, os.path.abspath(inspect.getfile(inspect.currentframe()))) parent = os.path.dirname(current) sys.path.insert(0, parent) - from mlp import module - model = module.create_model(data_size=data_size, + from mlp import model + model = model.create_model(data_size=data_size, num_classes=num_classes) # For distributed training, sequential gives better performance From e486eeae6bba66a79abe68788c5201d9e37a6cc3 Mon Sep 17 00:00:00 2001 From: wang wei Date: Mon, 19 Oct 2020 09:09:55 +0800 Subject: [PATCH 23/24] change default python version to 3.6 to be compatible with colab --- tool/conda/singa/conda_build_config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tool/conda/singa/conda_build_config.yaml b/tool/conda/singa/conda_build_config.yaml index 9ac45c0f49..ddef9d0071 100644 --- a/tool/conda/singa/conda_build_config.yaml +++ b/tool/conda/singa/conda_build_config.yaml @@ -31,8 +31,8 @@ cudnn: # [linux] dnnl: - 1.1 python: -# - 3.6 - - 3.7 + - 3.6 +# - 3.7 nccl: - 2.6.4.1 # [environ.get("CUDA")=="10.2"] - 2.4.8.1 # [environ.get("CUDA")=="10.0"] From ea8d671434717e16f98ab94917ede46338f2bdc7 Mon Sep 17 00:00:00 2001 From: wang wei Date: Mon, 19 Oct 2020 10:14:39 +0800 Subject: [PATCH 24/24] update the version setting in cmakefile --- CMakeLists.txt | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ba3102cf22..6a151f7b8f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,17 +22,22 @@ PROJECT(singa) LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Thirdparty) -include(GetGitRevisionDescription) -git_describe(VERSION --tags --dirty=-d) -string(REGEX REPLACE "^([0-9]+)\\..*" "\\1" VERSION_MAJOR "${VERSION}") -string(REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1" VERSION_MINOR "${VERSION}") -string(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" VERSION_PATCH "${VERSION}") +#include(GetGitRevisionDescription) +#git_describe(VERSION --tags --dirty=-d) +#string(REGEX REPLACE "^([0-9]+)\\..*" "\\1" VERSION_MAJOR "${VERSION}") +#string(REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1" VERSION_MINOR "${VERSION}") +#string(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" VERSION_PATCH "${VERSION}") + + +SET(PACKAGE_VERSION 3.1.0) # ${VERSION}) +SET(VERSION 3.1.0) +SET(SINGA_MAJOR_VERSION 3) +SET(SINGA_MINOR_VERSION 1) +SET(SINGA_PATCH_VERSION 0) +#SET(SINGA_MAJOR_VERSION ${VERSION_MAJOR}) # 0 - +#SET(SINGA_MINOR_VERSION ${VERSION_MINOR}) # 0 - 9 +#SET(SINGA_PATCH_VERSION ${VERSION_PATCH}) # 0 - 99 - -SET(PACKAGE_VERSION ${VERSION}) -SET(SINGA_MAJOR_VERSION ${VERSION_MAJOR}) # 0 - -SET(SINGA_MINOR_VERSION ${VERSION_MINOR}) # 0 - 9 -SET(SINGA_PATCH_VERSION ${VERSION_PATCH}) # 0 - 99 MATH(EXPR SINGA_VERSION "${SINGA_MAJOR_VERSION} * 1000 + ${SINGA_MINOR_VERSION} * 100 + ${SINGA_PATCH_VERSION}") message(STATUS "SINGA git Version ${VERSION}")