Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Testing compat Tensorflow 2 upgrade #19

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
520 changes: 260 additions & 260 deletions 3DMM/scripts/test_basis_io.py

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions data_prepare/detect_3D_landmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,15 +346,15 @@ def detect_lmk86(origin_images_dir, mtcnn_dir, out_dir, names_list, pb_path):
fopen = open(out_dir, "w")

with tf.Graph().as_default():
graph_def = tf.GraphDef()
graph_def = tf.compat.v1.GraphDef()
graph_file = pb_path
with open(graph_file, "rb") as f:
print("hello")
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")

with tf.Session() as sess:
tf.global_variables_initializer().run()
with tf.compat.v1.Session() as sess:
tf.compat.v1.global_variables_initializer().run()
image = sess.graph.get_tensor_by_name("lmk86pt_input:0")
predict_lanmark = sess.graph.get_tensor_by_name("lmk86pt_output:0")

Expand Down
6 changes: 3 additions & 3 deletions data_prepare/detect_face_with_mtcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,16 +346,16 @@ def detect_with_MTCNN(origin_images_dir, out_dir, pb_path, mode="no_depth"):
factor = 0.709 # scale factor

with tf.Graph().as_default():
graph_def = tf.GraphDef()
graph_def = tf.compat.v1.GraphDef()
graph_file = pb_path
with open(graph_file, "rb") as f:
print("hello")
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")

sess = tf.Session()
sess = tf.compat.v1.Session()
with sess.as_default():
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
pnet, rnet, onet = create_mtcnn_pb(sess)

# find files
Expand Down
28 changes: 14 additions & 14 deletions data_prepare/run_data_preparation.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,17 @@


def detect_2Dlmk_all_imgs(graph_file, img_dir, lmk3D_txt_path, lmk2D_txt_path):
with tf.Graph().as_default():
graph_def = tf.GraphDef()
with tf.Graph().as_default(), tf.device('/device:XLA_CPU:0'):
graph_def = tf.compat.v1.GraphDef()
graph_file = graph_file

with open(graph_file, "rb") as f:
print("hello")
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")

with tf.Session() as sess:
tf.initialize_all_variables().run()
with tf.compat.v1.Session() as sess:
tf.compat.v1.initialize_all_variables().run()

fopen = open(lmk2D_txt_path, "w")

Expand Down Expand Up @@ -155,17 +155,17 @@ def face_seg(graph_file, lmk3D_crop_txt_path, out_crop_dir, seg_dir):

landmarks3D, images_name = load_landmark(lmk3D_crop_txt_path, 86)

with tf.Graph().as_default():
graph_def = tf.GraphDef()
with tf.Graph().as_default(), tf.device('/device:XLA_CPU:0'):
graph_def = tf.compat.v1.GraphDef()
graph_file = graph_file

with open(graph_file, "rb") as f:
print("hello")
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")

with tf.Session() as sess:
tf.initialize_all_variables().run()
with tf.compat.v1.Session() as sess:
tf.compat.v1.initialize_all_variables().run()
count = 0
for i in range(0, len(images_name)):
img_name = images_name[i]
Expand Down Expand Up @@ -204,14 +204,14 @@ def prepare_test_data_RGB(img_dir, out_dir):
names_list = detect_face_with_mtcnn.detect_with_MTCNN(img_dir, mtcnn_dir, pb_path)

print("start detect 86pt 3D lmk")
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
pb_path = os.path.join(FLAGS.pb_path, "lmk3D_86_model.pb")
detect_3D_landmark.detect_lmk86(
img_dir, mtcnn_dir, lmk3D_ori_txt_path, names_list, pb_path
)

print("start detect 68pt 2D lmk") # need to transfer RGB in the function
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
pb_path = os.path.join(FLAGS.pb_path, "lmk2D_68_model.pb")
detect_2Dlmk_all_imgs(
pb_path, img_dir, lmk3D_ori_txt_path, lmk2D_ori_txt_path
Expand All @@ -229,7 +229,7 @@ def prepare_test_data_RGB(img_dir, out_dir):
)

print("start face seg") # need to transfer RGB in the function
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
pb_path = os.path.join(FLAGS.pb_path, "faceseg_model.pb")
face_seg(pb_path, lmk3D_crop_txt_path, crop_dir, seg_dir)

Expand Down Expand Up @@ -277,14 +277,14 @@ def prepare_test_data_RGBD(img_dir, out_dir):
)

print("start detect 86pt 3D lmk")
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
pb_path = os.path.join(FLAGS.pb_path, "lmk3D_86_model.pb")
detect_3D_landmark.detect_lmk86(
img_dir, mtcnn_dir, lmk3D_ori_txt_path, names_list, pb_path
)

print("start detect 68pt 2D lmk") # need to transfer RGB in the function
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
pb_path = os.path.join(FLAGS.pb_path, "lmk2D_68_model.pb")
detect_2Dlmk_all_imgs(
pb_path, img_dir, lmk3D_ori_txt_path, lmk2D_ori_txt_path
Expand All @@ -302,7 +302,7 @@ def prepare_test_data_RGBD(img_dir, out_dir):
)

print("start face seg") # need to transfer RGB in the function
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
pb_path = os.path.join(FLAGS.pb_path, "faceseg_model.pb")
face_seg(pb_path, lmk3D_crop_txt_path, crop_dir, seg_dir)

Expand Down
51 changes: 27 additions & 24 deletions optimization/rgb/run_RGB_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,13 @@
import scipy.io as scio
import cv2
import tensorflow as tf
import tensorflow.contrib.opt as tf_opt
#import tensorflow.contrib.opt as tf_opt
import os
from absl import app, flags
import sys
tf.compat.v1.disable_eager_execution()
tf.compat.v1.disable_v2_behavior()


sys.path.append("../..")

Expand All @@ -51,67 +54,67 @@
def define_variable(num_of_img, imageH, imageW, para_shape_shape, para_tex_shape, info):

# variable-trainable=False
image_batch = tf.get_variable(
image_batch = tf.compat.v1.get_variable(
shape=[num_of_img, imageH, imageW, 3],
dtype=tf.float32,
name="ori_img",
trainable=False,
initializer=tf.constant_initializer(info["img_list"] / 255.0),
initializer=tf.compat.v1.constant_initializer(info["img_list"] / 255.0),
)

segmentation = tf.get_variable(
segmentation = tf.compat.v1.get_variable(
shape=[num_of_img, imageH, imageW, 19],
dtype=tf.float32,
name="face_segmentation",
trainable=False,
initializer=tf.constant_initializer(info["seg_list"]),
initializer=tf.compat.v1.constant_initializer(info["seg_list"]),
)

lmk_86_3d_batch = tf.get_variable(
lmk_86_3d_batch = tf.compat.v1.get_variable(
shape=[num_of_img, 86, 2],
dtype=tf.float32,
name="lmk_86_3d_batch",
trainable=False,
initializer=tf.constant_initializer(info["lmk_list3D"]),
initializer=tf.compat.v1.constant_initializer(info["lmk_list3D"]),
)

lmk_68_2d_batch = tf.get_variable(
lmk_68_2d_batch = tf.compat.v1.get_variable(
shape=[num_of_img, 68, 2],
dtype=tf.float32,
name="lmk_68_2d_batch",
trainable=False,
initializer=tf.constant_initializer(info["lmk_list2D"]),
initializer=tf.compat.v1.constant_initializer(info["lmk_list2D"]),
)

K = tf.get_variable(
K = tf.compat.v1.get_variable(
shape=[1, 3, 3],
dtype=tf.float32,
name="K",
trainable=False,
initializer=tf.constant_initializer(info["K"]),
initializer=tf.compat.v1.constant_initializer(info["K"]),
)

# variable-trainable=True
pose6 = tf.get_variable(
pose6 = tf.compat.v1.get_variable(
shape=[num_of_img, 6, 1],
dtype=tf.float32,
name="para_pose6",
trainable=True,
initializer=tf.constant_initializer(info["se3_list"]),
initializer=tf.compat.v1.constant_initializer(info["se3_list"]),
)

para_shape = tf.get_variable(
para_shape = tf.compat.v1.get_variable(
shape=[1, para_shape_shape], dtype=tf.float32, name="para_shape", trainable=True
)

para_tex = tf.get_variable(
para_tex = tf.compat.v1.get_variable(
shape=[1, para_tex_shape], dtype=tf.float32, name="para_tex", trainable=True
)

para_illum = tf.get_variable(
para_illum = tf.compat.v1.get_variable(
shape=[num_of_img, 27],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
initializer=tf.compat.v1.zeros_initializer(),
name="para_illum",
trainable=True,
)
Expand Down Expand Up @@ -164,12 +167,12 @@ def build_RGB_opt_graph(var_list, basis3dmm, imageH, imageW):
# optimizer
global_step = tf.Variable(0, name="global_step_train", trainable=False)
learning_rate = tf.maximum(
tf.train.exponential_decay(
tf.compat.v1.train.exponential_decay(
FLAGS.learning_rate, global_step, FLAGS.lr_decay_step, FLAGS.lr_decay_rate
),
FLAGS.min_learning_rate,
)
optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
optim = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)

gvs_illum = optim.compute_gradients(tot_loss_illum)
gvs = optim.compute_gradients(tot_loss)
Expand Down Expand Up @@ -241,20 +244,20 @@ def RGB_opt(_):
out_list = build_RGB_opt_graph(var_list, basis3dmm, imageH, imageW)

# summary_op
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir)
summary_op = tf.compat.v1.summary.merge_all()
summary_writer = tf.compat.v1.summary.FileWriter(FLAGS.summary_dir)

if os.path.exists(FLAGS.summary_dir) is False:
os.makedirs(FLAGS.summary_dir)
if os.path.exists(FLAGS.out_dir) is False:
os.makedirs(FLAGS.out_dir)

# start opt
config = tf.ConfigProto()
config = tf.compat.v1.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())

import time

Expand Down
6 changes: 3 additions & 3 deletions optimization/rgbd/RGBD_utils/PoseTools_TF.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,13 @@ def tf_Rodrigues(a, b, c, theta):

rr0 = (
cos_theta * I
+ (1 - cos_theta) * (n * tf.transpose(n))
+ (1 - cos_theta) * (n * tf.transpose(a=n))
+ sin_theta * n_hat
)
return rr0

rr = tf.cond(
tf.squeeze(theta) > 1e-3,
pred=tf.squeeze(theta) > 1e-3,
true_fn=lambda: tf_Rodrigues(a, b, c, theta),
false_fn=lambda: tf.eye(3, 3),
)
Expand All @@ -125,7 +125,7 @@ def tf_trans_inverse(rr, tt):
# in/out : n* 3 * 4
# rr = TWC[0:3, 0:3]
# tt = TWC[0:3, 3:]
rr_1 = tf.transpose(rr, [1, 0])
rr_1 = tf.transpose(a=rr, perm=[1, 0])
tt_1 = -1 * tf.matmul(rr_1, tt)
return rr_1, tt_1

Expand Down
30 changes: 16 additions & 14 deletions optimization/rgbd/step4A_prefit_Albedo_Global.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@
from PIL import Image
import sys
sys.path.append('../..')
tf.compat.v1.disable_eager_execution()
tf.compat.v1.disable_v2_behavior()

from utils.basis import load_3dmm_basis, get_region_uv_texture, construct
from utils.misc import tf_blend_uv, blend_uv
Expand Down Expand Up @@ -124,14 +126,14 @@ def main(_):
imageH = img_list[0].shape[0]
imageW = img_list[0].shape[1]
assert(img_list[0].shape[0] == img_list[1].shape[0] and img_list[0].shape[1] == img_list[2].shape[1])
image_mid_batch = tf.placeholder(dtype=tf.float32, shape=[1, imageH, imageW, 3], name='image_mid')
image_left_batch = tf.placeholder(dtype=tf.float32, shape=[1, imageH, imageW, 3], name='image_left')
image_right_batch = tf.placeholder(dtype=tf.float32, shape=[1, imageH, imageW, 3], name='image_right')
image_mid_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, imageH, imageW, 3], name='image_mid')
image_left_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, imageH, imageW, 3], name='image_left')
image_right_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, imageH, imageW, 3], name='image_right')

NV = basis3dmm['basis_shape'].shape[1] // 3
proj_xyz_mid_batch = tf.placeholder(dtype=tf.float32, shape=[1, NV, 3], name='proj_xyz_mid')
proj_xyz_left_batch = tf.placeholder(dtype=tf.float32, shape=[1, NV, 3], name='proj_xyz_left')
proj_xyz_right_batch = tf.placeholder(dtype=tf.float32, shape=[1, NV, 3], name='proj_xyz_right')
proj_xyz_mid_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, NV, 3], name='proj_xyz_mid')
proj_xyz_left_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, NV, 3], name='proj_xyz_left')
proj_xyz_right_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, NV, 3], name='proj_xyz_right')

ver_normals_mid_batch, _ = Projector.get_ver_norm(proj_xyz_mid_batch, basis3dmm['tri'], 'normal_mid')
ver_normals_left_batch, _ = Projector.get_ver_norm(proj_xyz_left_batch, basis3dmm['tri'], 'normal_left')
Expand Down Expand Up @@ -179,17 +181,17 @@ def main(_):
#------------------------------------------------------------------------------------------
# build fitting graph
uv_bases = basis3dmm['uv']
para_tex = tf.get_variable(
para_tex = tf.compat.v1.get_variable(
shape=[1,uv_bases['basis'].shape[0]],
initializer=tf.zeros_initializer(),
initializer=tf.compat.v1.zeros_initializer(),
name='para_tex'
)

uv_rgb, uv_mask = get_region_uv_texture(uv_bases, para_tex)
print("uv_rgb: ", uv_rgb.shape )

# build fitting loss
input_uv512_batch = tf.placeholder(dtype=tf.float32, shape=[1, 512, 512, 3], name='gt_uv')
input_uv512_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, 512, 512, 3], name='gt_uv')
tot_loss = 0.
loss_str = 'total:{}'
if FLAGS.photo_weight > 0:
Expand All @@ -206,16 +208,16 @@ def main(_):
uv_reg_tex_loss = Losses.reg_loss(para_tex)
tot_loss = tot_loss + uv_reg_tex_loss * FLAGS.uv_reg_tex_weight
loss_str = loss_str + '; reg:{}'
optim = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
optim = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optim.minimize(tot_loss)


with tf.Session() as sess:
with tf.compat.v1.Session() as sess:

if FLAGS.write_graph:
tf.train.write_graph(sess.graph_def, '', FLAGS.pb_path, as_text=True)
tf.io.write_graph(sess.graph_def, '', FLAGS.pb_path, as_text=True)
exit()
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())

start_time = time.time()
uv_extract, o_uv_left_batch, o_uv_mid_batch, o_uv_right_batch, o_mask_right_batch= \
Expand Down Expand Up @@ -305,4 +307,4 @@ def main(_):
flags.DEFINE_float('learning_rate', 0.1, 'string : path for 3dmm')
flags.DEFINE_string('GPU_NO', '7', 'which GPU')

tf.app.run(main)
tf.compat.v1.app.run(main)
Loading