diff --git a/CMU_Mocap_Markers.pp b/CMU_Mocap_Markers.pp deleted file mode 100644 index 9632316..0000000 --- a/CMU_Mocap_Markers.pp +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/README.md b/README.md index 4c747fb..97edde3 100644 --- a/README.md +++ b/README.md @@ -5,19 +5,12 @@ Numpy and Tensorflow implementation of SMPL model. For any questions, feel free I wrote this because the author-provided implementation was mainly based on [chumpy](https://github.com/mattloper/chumpy) in Python 2, which is kind of unpopular. Meanwhile, the official one cannot run on GPU. -This numpy version is faster(since some computation is re-wrote in a vectorized manner) and easier to understand(hope so), and the tensorflow version can run on GPU. +This numpy version is faster (since some computation is re-wrote in a vectorized manner) and easier to understand (hope so), and the tensorflow version can run on GPU. For more details about SMPL model, see [SMPL](http://smpl.is.tue.mpg.de/). -Also, I provide a file `CMU_Mocap_Markers.pp`, which gives the correspondence between SMPL model and [CMU Mocap Dataset](http://mocap.cs.cmu.edu/) markers in .c3d files. For more details see the Usage section. - ## Usage 1. Download the model file [here](http://smpl.is.tue.mpg.de/downloads). -2. Run `python preprocess.py /PATH/TO/THE/DOWNLOADED/MODEL` to preprocess the official model. `preprocess.py` will create a new file `model.pkl`. `smpl_np.py` and `smpl_tf.py` both rely on `model.pkl`. **NOTE: the official pickle model contains `chumpy` object, so `prerocess.py` requires `chumpy` to extract official model.** Actually you need to modify chumpy's cource code a little bit to make it compatible to `preprocess.py`. -3. Run `python smpl_np.py` to see the example. -4. About `CMU_Mocap_Markers.pp`: you can first generate a standard SMPL model mesh(zero pose and zero beta), open it in MeshLab, and load this file in MeshLab. It gives 42 markers' position on the model surface. I simply mark these things by hand so there might be some small errors. - -## One More Thing - -If this repo is used in any publication or project, it would be nice to let me know. I will be very happy and encouraged =) +2. Run `python preprocess.py /PATH/TO/THE/DOWNLOADED/MODEL` to preprocess the official model. `preprocess.py` will create a new file `model.pkl`. `smpl_np.py` and `smpl_tf.py` both rely on `model.pkl`. **NOTE**: the official pickle model contains `chumpy` object, so `prerocess.py` requires `chumpy` to extract official model. You need to modify chumpy's cource code a little bit to make it compatible to `preprocess.py` (and Python 3). +3. Run `python smpl_np.py` or `python smpl_tf.py` to see the example. diff --git a/smpl_np.py b/smpl_np.py index 429776a..c6ba3d1 100644 --- a/smpl_np.py +++ b/smpl_np.py @@ -3,122 +3,211 @@ class SMPLModel(): - def __init__(self, model_path): - with open(model_path, 'rb') as f: - params = pickle.load(f) - - self.J_regressor = params['J_regressor'] - self.weights = params['weights'] - self.posedirs = params['posedirs'] - self.v_template = params['v_template'] - self.shapedirs = params['shapedirs'] - self.faces = params['f'] - self.kintree_table = params['kintree_table'] - - id_to_col = {self.kintree_table[1, i]: i for i in range(self.kintree_table.shape[1])} - self.parent = { - i: id_to_col[self.kintree_table[0, i]] - for i in range(1, self.kintree_table.shape[1]) - } - - self.pose_shape = [24, 3] - self.beta_shape = [10] - self.trans_shape = [3] - - self.pose = np.zeros(self.pose_shape) - self.beta = np.zeros(self.beta_shape) - self.trans = np.zeros(self.trans_shape) - - self.verts = None - self.J = None - self.R = None - - self.update() - - def set_params(self, pose=None, beta=None, trans=None): - if pose is not None: - self.pose = pose - if beta is not None: - self.beta = beta - if trans is not None: - self.trans = trans - self.update() - return self.verts - - def update(self): - v_shaped = self.shapedirs.dot(self.beta) + self.v_template # how beta affect body shape - self.J = self.J_regressor.dot(v_shaped) # joints location - pose_cube = self.pose.reshape((-1, 1, 3)) - self.R = self.rodrigues(pose_cube) # rotation matrix for each joint - I_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), (self.R.shape[0]-1, 3, 3)) - lrotmin = (self.R[1:] - I_cube).ravel() - v_posed = v_shaped + self.posedirs.dot(lrotmin) # how pose affect body shape in zero pose - G = np.empty((self.kintree_table.shape[1], 4, 4)) # world transformation of each joint - G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1])))) - for i in range(1, self.kintree_table.shape[1]): - G[i] = G[self.parent[i]].dot( - self.with_zeros( - np.hstack( - [self.R[i], ((self.J[i, :] - self.J[self.parent[i], :]).reshape([3, 1]))] - ) - ) - ) - # remove the transformation due to the rest pose - G = G - self.pack( - np.matmul( - G, - np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1]) - ) - ) - T = np.tensordot(self.weights, G, axes=[[1], [0]]) # transformation of each vertex - rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1]))) - v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3] - self.verts = v + self.trans.reshape([1, 3]) - - def rodrigues(self, r): - theta = np.linalg.norm(r, axis=(1, 2), keepdims=True) - # avoid zero divide - theta = np.maximum(theta, np.finfo(np.float64).tiny) - r_hat = r / theta - cos = np.cos(theta) - z_stick = np.zeros(theta.shape[0]) - m = np.dstack([ - z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], - r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], - -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick] - ).reshape([-1, 3, 3]) - i_cube = np.broadcast_to( - np.expand_dims(np.eye(3), axis=0), - [theta.shape[0], 3, 3] + def __init__(self, model_path): + """ + SMPL model. + + Parameter: + --------- + model_path: Path to the SMPL model parameters, pre-processed by + `preprocess.py`. + + """ + with open(model_path, 'rb') as f: + params = pickle.load(f) + + self.J_regressor = params['J_regressor'] + self.weights = params['weights'] + self.posedirs = params['posedirs'] + self.v_template = params['v_template'] + self.shapedirs = params['shapedirs'] + self.faces = params['f'] + self.kintree_table = params['kintree_table'] + + id_to_col = { + self.kintree_table[1, i]: i for i in range(self.kintree_table.shape[1]) + } + self.parent = { + i: id_to_col[self.kintree_table[0, i]] + for i in range(1, self.kintree_table.shape[1]) + } + + self.pose_shape = [24, 3] + self.beta_shape = [10] + self.trans_shape = [3] + + self.pose = np.zeros(self.pose_shape) + self.beta = np.zeros(self.beta_shape) + self.trans = np.zeros(self.trans_shape) + + self.verts = None + self.J = None + self.R = None + + self.update() + + def set_params(self, pose=None, beta=None, trans=None): + """ + Set pose, shape, and/or translation parameters of SMPL model. Verices of the + model will be updated and returned. + + Prameters: + --------- + pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation + relative to parent joint. For root joint it's global orientation. + Represented in a axis-angle format. + + beta: Parameter for model shape. A vector of shape [10]. Coefficients for + PCA component. Only 10 components were released by MPI. + + trans: Global translation of shape [3]. + + Return: + ------ + Updated vertices. + + """ + if pose is not None: + self.pose = pose + if beta is not None: + self.beta = beta + if trans is not None: + self.trans = trans + self.update() + return self.verts + + def update(self): + """ + Called automatically when parameters are updated. + + """ + # how beta affect body shape + v_shaped = self.shapedirs.dot(self.beta) + self.v_template + # joints location + self.J = self.J_regressor.dot(v_shaped) + pose_cube = self.pose.reshape((-1, 1, 3)) + # rotation matrix for each joint + self.R = self.rodrigues(pose_cube) + I_cube = np.broadcast_to( + np.expand_dims(np.eye(3), axis=0), + (self.R.shape[0]-1, 3, 3) + ) + lrotmin = (self.R[1:] - I_cube).ravel() + # how pose affect body shape in zero pose + v_posed = v_shaped + self.posedirs.dot(lrotmin) + # world transformation of each joint + G = np.empty((self.kintree_table.shape[1], 4, 4)) + G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1])))) + for i in range(1, self.kintree_table.shape[1]): + G[i] = G[self.parent[i]].dot( + self.with_zeros( + np.hstack( + [self.R[i],((self.J[i, :]-self.J[self.parent[i],:]).reshape([3,1]))] + ) ) - A = np.transpose(r_hat, axes=[0, 2, 1]) - B = r_hat - dot = np.matmul(A, B) - R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m - return R - - def with_zeros(self, x): - return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]]))) - - - def pack(self, x): - return np.dstack((np.zeros((x.shape[0], 4, 3)), x)) - - def save_to_obj(self, path): - with open(path, 'w') as fp: - for v in self.verts: - fp.write('v %f %f %f\n' % (v[0], v[1], v[2])) - for f in self.faces + 1: - fp.write('f %d %d %d\n' % (f[0], f[1], f[2])) + ) + # remove the transformation due to the rest pose + G = G - self.pack( + np.matmul( + G, + np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1]) + ) + ) + # transformation of each vertex + T = np.tensordot(self.weights, G, axes=[[1], [0]]) + rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1]))) + v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3] + self.verts = v + self.trans.reshape([1, 3]) + + def rodrigues(self, r): + """ + Rodrigues' rotation formula that turns axis-angle vector into rotation + matrix in a batch-ed manner. + + Parameter: + ---------- + r: Axis-angle rotation vector of shape [batch_size, 1, 3]. + + Return: + ------- + Rotation matrix of shape [batch_size, 3, 3]. + + """ + theta = np.linalg.norm(r, axis=(1, 2), keepdims=True) + # avoid zero divide + theta = np.maximum(theta, np.finfo(np.float64).tiny) + r_hat = r / theta + cos = np.cos(theta) + z_stick = np.zeros(theta.shape[0]) + m = np.dstack([ + z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], + r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], + -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick] + ).reshape([-1, 3, 3]) + i_cube = np.broadcast_to( + np.expand_dims(np.eye(3), axis=0), + [theta.shape[0], 3, 3] + ) + A = np.transpose(r_hat, axes=[0, 2, 1]) + B = r_hat + dot = np.matmul(A, B) + R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m + return R + + def with_zeros(self, x): + """ + Append a [0, 0, 0, 1] vector to a [3, 4] matrix. + + Parameter: + --------- + x: Matrix to be appended. + + Return: + ------ + Matrix after appending of shape [4,4] + + """ + return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]]))) + + def pack(self, x): + """ + Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched + manner. + + Parameter: + ---------- + x: Matrices to be appended of shape [batch_size, 4, 1] + + Return: + ------ + Matrix of shape [batch_size, 4, 4] after appending. + + """ + return np.dstack((np.zeros((x.shape[0], 4, 3)), x)) + + def save_to_obj(self, path): + """ + Save the SMPL model into .obj file. + + Parameter: + --------- + path: Path to save. + + """ + with open(path, 'w') as fp: + for v in self.verts: + fp.write('v %f %f %f\n' % (v[0], v[1], v[2])) + for f in self.faces + 1: + fp.write('f %d %d %d\n' % (f[0], f[1], f[2])) if __name__ == '__main__': - smpl = SMPLModel('./model.pkl') - np.random.seed(9608) - pose = (np.random.rand(*smpl.pose_shape) - 0.5) * 0.4 - beta = (np.random.rand(*smpl.beta_shape) - 0.5) * 0.06 - trans = np.zeros(smpl.trans_shape) - smpl.set_params(beta=beta, pose=pose, trans=trans) - smpl.save_to_obj('./smpl_np.obj') + smpl = SMPLModel('./model.pkl') + np.random.seed(9608) + pose = (np.random.rand(*smpl.pose_shape) - 0.5) * 0.4 + beta = (np.random.rand(*smpl.beta_shape) - 0.5) * 0.06 + trans = np.zeros(smpl.trans_shape) + smpl.set_params(beta=beta, pose=pose, trans=trans) + smpl.save_to_obj('./smpl_np.obj') diff --git a/smpl_tf.py b/smpl_tf.py index 676d2f4..e5b60a8 100644 --- a/smpl_tf.py +++ b/smpl_tf.py @@ -4,95 +4,190 @@ def rodrigues(r): - theta = tf.norm(r, axis=(1, 2), keepdims=True) - theta = tf.maximum(theta, np.finfo(np.float64).tiny) - r_hat = r / theta - cos = tf.cos(theta) - z_stick = tf.zeros(theta.get_shape().as_list()[0], dtype=tf.float64) - m = tf.stack( - (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, - -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), axis=1) - m = tf.reshape(m, (-1, 3, 3)) - i_cube = tf.expand_dims(tf.eye(3, dtype=tf.float64), axis=0) + tf.zeros( - (theta.get_shape().as_list()[0], 3, 3), dtype=tf.float64) - A = tf.transpose(r_hat, (0, 2, 1)) - B = r_hat - dot = tf.matmul(A, B) - R = cos * i_cube + (1 - cos) * dot + tf.sin(theta) * m - return R + """ + Rodrigues' rotation formula that turns axis-angle tensor into rotation + matrix in a batch-ed manner. + + Parameter: + ---------- + r: Axis-angle rotation tensor of shape [batch_size, 1, 3]. + + Return: + ------- + Rotation matrix of shape [batch_size, 3, 3]. + + """ + theta = tf.norm(r, axis=(1, 2), keepdims=True) + # avoid divide by zero + theta = tf.maximum(theta, np.finfo(np.float64).tiny) + r_hat = r / theta + cos = tf.cos(theta) + z_stick = tf.zeros(theta.get_shape().as_list()[0], dtype=tf.float64) + m = tf.stack( + (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, + -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), axis=1) + m = tf.reshape(m, (-1, 3, 3)) + i_cube = tf.expand_dims(tf.eye(3, dtype=tf.float64), axis=0) + tf.zeros( + (theta.get_shape().as_list()[0], 3, 3), dtype=tf.float64) + A = tf.transpose(r_hat, (0, 2, 1)) + B = r_hat + dot = tf.matmul(A, B) + R = cos * i_cube + (1 - cos) * dot + tf.sin(theta) * m + return R def with_zeros(x): - return tf.concat((x, tf.constant([[0.0, 0.0, 0.0, 1.0]], dtype=tf.float64)), axis=0) + """ + Append a [0, 0, 0, 1] tensor to a [3, 4] tensor. + + Parameter: + --------- + x: Tensor to be appended. + + Return: + ------ + Tensor after appending of shape [4,4] + + """ + ret = tf.concat( + (x, tf.constant([[0.0, 0.0, 0.0, 1.0]], dtype=tf.float64)), + axis=0 + ) + return ret def pack(x): - return tf.concat((tf.zeros((x.get_shape().as_list()[0], 4, 3), dtype=tf.float64), x), axis=2) + """ + Append zero tensors of shape [4, 3] to a batch of [4, 1] shape tensor. + + Parameter: + ---------- + x: A tensor of shape [batch_size, 4, 1] + + Return: + ------ + A tensor of shape [batch_size, 4, 4] after appending. + + """ + ret = tf.concat( + (tf.zeros((x.get_shape().as_list()[0], 4, 3), dtype=tf.float64), x), + axis=2 + ) + return ret def smpl_model(model_path, betas, pose, trans, simplify=False): - with open(model_path, 'rb') as f: - params = pickle.load(f) - - J_regressor = tf.constant(np.array(params['J_regressor'].todense(), dtype=np.float64)) - weights = tf.constant(params['weights'], dtype=np.float64) - posedirs = tf.constant(params['posedirs'], dtype=np.float64) - v_template = tf.constant(params['v_template'], dtype=np.float64) - shapedirs = tf.constant(params['shapedirs'], dtype=np.float64) - f = params['f'] - - kintree_table = params['kintree_table'] - id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])} - parent = { - i: id_to_col[kintree_table[0, i]] - for i in range(1, kintree_table.shape[1]) - } - v_shaped = tf.tensordot(shapedirs, betas, axes=[[2], [0]]) + v_template - J = tf.matmul(J_regressor, v_shaped) - pose_cube = tf.reshape(pose, (-1, 1, 3)) - R_cube_big = rodrigues(pose_cube) - if simplify: - v_posed = v_shaped - else: - R_cube = R_cube_big[1:] - I_cube = tf.expand_dims(tf.eye(3, dtype=tf.float64), axis=0) + tf.zeros((R_cube.get_shape()[0], 3, 3), dtype=tf.float64) - lrotmin = tf.squeeze(tf.reshape((R_cube - I_cube), (-1, 1))) - v_posed = v_shaped + tf.tensordot(posedirs, lrotmin, axes=[[2], [0]]) - results = [] - results.append(with_zeros(tf.concat((R_cube_big[0], tf.reshape(J[0, :], (3, 1))), axis=1))) - for i in range(1, kintree_table.shape[1]): - results.append(tf.matmul(results[parent[i]], with_zeros(tf.concat((R_cube_big[i], tf.reshape(J[i, :] - J[parent[i], :], (3, 1))), axis=1)))) - stacked = tf.stack(results, axis=0) - results = stacked - pack(tf.matmul(stacked, tf.reshape(tf.concat((J, tf.zeros((24, 1), dtype=tf.float64)), axis=1), (24, 4, 1)))) - T = tf.tensordot(weights, results, axes=((1), (0))) - rest_shape_h = tf.concat((v_posed, tf.ones((v_posed.get_shape().as_list()[0], 1), dtype=tf.float64)), axis=1) - v = tf.matmul(T, tf.reshape(rest_shape_h, (-1, 4, 1))) - v = tf.reshape(v, (-1, 4))[:, :3] - result = v + tf.reshape(trans, (1, 3)) - return result, f + """ + Construct a compute graph that takes in parameters and outputs a tensor as + model vertices. Face indices are also returned as a numpy ndarray. + + Prameters: + --------- + pose: Also known as 'theta', a [24,3] tensor indicating child joint rotation + relative to parent joint. For root joint it's global orientation. + Represented in a axis-angle format. + + betas: Parameter for model shape. A tensor of shape [10] as coefficients of + PCA components. Only 10 components were released by SMPL author. + + trans: Global translation tensor of shape [3]. + + Return: + ------ + A tensor for vertices, and a numpy ndarray as face indices. + + """ + # For detailed comments see smpl_np.py + with open(model_path, 'rb') as f: + params = pickle.load(f) + + J_regressor = tf.constant( + np.array(params['J_regressor'].todense(), + dtype=np.float64) + ) + weights = tf.constant(params['weights'], dtype=np.float64) + posedirs = tf.constant(params['posedirs'], dtype=np.float64) + v_template = tf.constant(params['v_template'], dtype=np.float64) + shapedirs = tf.constant(params['shapedirs'], dtype=np.float64) + f = params['f'] + + kintree_table = params['kintree_table'] + id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])} + parent = { + i: id_to_col[kintree_table[0, i]] + for i in range(1, kintree_table.shape[1]) + } + v_shaped = tf.tensordot(shapedirs, betas, axes=[[2], [0]]) + v_template + J = tf.matmul(J_regressor, v_shaped) + pose_cube = tf.reshape(pose, (-1, 1, 3)) + R_cube_big = rodrigues(pose_cube) + if simplify: + v_posed = v_shaped + else: + R_cube = R_cube_big[1:] + I_cube = tf.expand_dims(tf.eye(3, dtype=tf.float64), axis=0) + \ + tf.zeros((R_cube.get_shape()[0], 3, 3), dtype=tf.float64) + lrotmin = tf.squeeze(tf.reshape((R_cube - I_cube), (-1, 1))) + v_posed = v_shaped + tf.tensordot(posedirs, lrotmin, axes=[[2], [0]]) + results = [] + results.append( + with_zeros(tf.concat((R_cube_big[0], tf.reshape(J[0, :], (3, 1))), axis=1)) + ) + for i in range(1, kintree_table.shape[1]): + results.append( + tf.matmul( + results[parent[i]], + with_zeros( + tf.concat( + (R_cube_big[i], tf.reshape(J[i, :] - J[parent[i], :], (3, 1))), + axis=1 + ) + ) + ) + ) + stacked = tf.stack(results, axis=0) + results = stacked - \ + pack( + tf.matmul( + stacked, + tf.reshape( + tf.concat((J, tf.zeros((24, 1), dtype=tf.float64)), axis=1), + (24, 4, 1) + ) + ) + ) + T = tf.tensordot(weights, results, axes=((1), (0))) + rest_shape_h = tf.concat( + (v_posed, tf.ones((v_posed.get_shape().as_list()[0], 1), dtype=tf.float64)), + axis=1 + ) + v = tf.matmul(T, tf.reshape(rest_shape_h, (-1, 4, 1))) + v = tf.reshape(v, (-1, 4))[:, :3] + result = v + tf.reshape(trans, (1, 3)) + return result, f if __name__ == '__main__': - pose_size = 72 - beta_size = 10 + pose_size = 72 + beta_size = 10 - np.random.seed(9608) - pose = (np.random.rand(pose_size) - 0.5) * 0.4 - betas = (np.random.rand(beta_size) - 0.5) * 0.06 - trans = np.zeros(3) + np.random.seed(9608) + pose = (np.random.rand(pose_size) - 0.5) * 0.4 + betas = (np.random.rand(beta_size) - 0.5) * 0.06 + trans = np.zeros(3) - pose = tf.constant(pose, dtype=tf.float64) - betas = tf.constant(betas, dtype=tf.float64) - trans = tf.constant(trans, dtype=tf.float64) + pose = tf.constant(pose, dtype=tf.float64) + betas = tf.constant(betas, dtype=tf.float64) + trans = tf.constant(trans, dtype=tf.float64) - output, faces = smpl_model('./model.pkl', betas, pose, trans, True) - sess = tf.Session() - result = sess.run(output) + output, faces = smpl_model('./model.pkl', betas, pose, trans, True) + sess = tf.Session() + result = sess.run(output) - outmesh_path = './smpl_tf.obj' - with open(outmesh_path, 'w') as fp: - for v in result: - fp.write('v %f %f %f\n' % (v[0], v[1], v[2])) + outmesh_path = './smpl_tf.obj' + with open(outmesh_path, 'w') as fp: + for v in result: + fp.write('v %f %f %f\n' % (v[0], v[1], v[2])) - for f in faces + 1: - fp.write('f %d %d %d\n' % (f[0], f[1], f[2])) + for f in faces + 1: + fp.write('f %d %d %d\n' % (f[0], f[1], f[2])) diff --git a/double_check.py b/test.py similarity index 82% rename from double_check.py rename to test.py index c6ce428..d1ac8e8 100644 --- a/double_check.py +++ b/test.py @@ -5,6 +5,18 @@ def compute_diff(a, b): + """ + Compute the max relative difference between ndarray a and b element-wisely. + + Parameters: + ---------- + a, b: ndarrays to be compared of same shape. + + Return: + ------ + The max relative difference. + + """ return np.max(np.abs(a - b) / np.minimum(a, b))