Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
dcslin committed Sep 30, 2020
1 parent f20345c commit b439aac
Show file tree
Hide file tree
Showing 5 changed files with 6 additions and 5 deletions.
1 change: 1 addition & 0 deletions LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ license and copyright terms herein.
=====================================================================
SINGA bundles the following under MIT license:
cmake/ThirdParty/FindOpenCL.cmake
include/half.hpp

Copyright (c) 2010-2016 Institute for Microelectronics,
Institute for Analysis and Scientific Computing, TU Wien.
Expand Down
4 changes: 2 additions & 2 deletions examples/cnn/train_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,6 @@ def run(global_rank,
from data import mnist
train_x, train_y, val_x, val_y = mnist.load()

train_x = train_x.astype(np_dtype[precision])
val_x = val_x.astype(np_dtype[precision])

num_channels = train_x.shape[1]
image_size = train_x.shape[2]
Expand Down Expand Up @@ -216,6 +214,7 @@ def run(global_rank,
x = augmentation(x, batch_size)
if (image_size != model.input_size):
x = resize_dataset(x, model.input_size)
x = x.astype(np_dtype[precision])
y = train_y[idx[b * batch_size:(b + 1) * batch_size]]

# Copy the patch data into input tensors
Expand Down Expand Up @@ -246,6 +245,7 @@ def run(global_rank,
if model.dimension == 4:
if (image_size != model.input_size):
x = resize_dataset(x, model.input_size)
x = x.astype(np_dtype[precision])
y = val_y[b * batch_size:(b + 1) * batch_size]
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
Expand Down
2 changes: 1 addition & 1 deletion examples/mlp/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def create_model(pretrained=False, **kwargs):
data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np_precision)

dev = device.create_cuda_gpu_on(0)
sgd = opt.SGD(0.1, 0.9, 1e-5)
sgd = opt.SGD(0.1, 0.9, 1e-5, dtype=singa_dtype[args.precision])
tx = tensor.Tensor((400, 2), dev, precision)
ty = tensor.Tensor((400,), dev, tensor.int32)
model = MLP(data_size=2, perceptron_size=3, num_classes=2)
Expand Down
2 changes: 1 addition & 1 deletion python/singa/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -856,7 +856,7 @@ def forward(self, x):

self.device_check(x, self.scale, self.bias, self.running_mean,
self.running_var)
self.type_check(x, self.scale, self.bias, self.running_mean,
self.dtype_check(x, self.scale, self.bias, self.running_mean,
self.running_var)

y = autograd.batchnorm_2d(
Expand Down
2 changes: 1 addition & 1 deletion src/model/operation/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ CudnnConvHandle::CudnnConvHandle(
channels / groups, kernel_h, kernel_w));

if (prefer == "tensor_ops") {
std::cout<<"using tensor op\n";
// std::cout<<"using tensor op\n";
CUDNN_CHECK(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
fp_alg = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
bp_filter_alg = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
Expand Down

0 comments on commit b439aac

Please sign in to comment.