-
Notifications
You must be signed in to change notification settings - Fork 72
/
test_video.py
61 lines (51 loc) · 1.96 KB
/
test_video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import os
from collections import OrderedDict
from options.test_options import TestOptions
from data.custom_dataset_data_loader import CreateDataset
from models.models import create_model
import util.util as util
import torch
from imageio import get_writer
import numpy as np
from tqdm import tqdm
opt = TestOptions().parse(save=False)
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
dataset = CreateDataset(opt)
# test
model = create_model(opt)
if opt.verbose:
print(model)
# test whole video sequence
# 20181009: do we use first frame as input?
data = dataset[0]
if opt.use_first_frame:
prev_frame = data['image']
start_from = 1
from skimage.io import imsave
imsave('results/ref.png', util.tensor2im(prev_frame))
generated = [util.tensor2im(prev_frame)]
else:
prev_frame = torch.zeros_like(data['image'])
start_from = 0
generated = []
from skimage.io import imsave
for i in tqdm(range(start_from, dataset.clip_length)):
label = data['label'][i:i+1]
#print(label.shape)
inst = None if opt.no_instance else data['inst'][i:i+1]
cur_frame = model.inference(label, inst, torch.unsqueeze(prev_frame, dim=0))
prev_frame = cur_frame.data[0]
imsave('./datasets/cardio_dance_test/test_sync/{:05d}.png'.format(i), util.tensor2im(prev_frame))
generated.append(util.tensor2im(prev_frame))
result_dir = os.path.join(opt.results_dir, opt.name, opt.which_epoch)
if not os.path.isdir(result_dir):
os.makedirs(result_dir, exist_ok=True)
with get_writer(os.path.join(result_dir, 'test_clip_ref.avi' if opt.use_first_frame else 'test_clip.avi'), fps=25) as writer:
for im in generated:
writer.append_data(im)
writer.close()