forked from miangoleh/dollyzoom
-
Notifications
You must be signed in to change notification settings - Fork 0
/
autozoom.py
123 lines (93 loc) · 3.33 KB
/
autozoom.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/usr/bin/env python
import torch
import torchvision
import base64
import cupy
import cv2
import flask
import getopt
import gevent
import gevent.pywsgi
import glob
import h5py
import io
import math
import moviepy
import moviepy.editor
import numpy
import os
import random
import re
import scipy
import scipy.io
import shutil
import sys
import tempfile
import time
import urllib
import zipfile
import matplotlib.pyplot as plt
def showImage(img):
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
##########################################################
assert (int(str('').join(torch.__version__.split('.')[0:2])) >= 12) # requires at least pytorch version 1.2.0
torch.set_grad_enabled(False) # make sure to not compute gradients for computational performance
torch.backends.cudnn.enabled = True # make sure to use cudnn for computational performance
##########################################################
objCommon = {}
exec(open('./common.py', 'r').read())
# exec(open('./models/disparity_estimation.py', 'r').read())
# exec(open('./models/disparity_adjustment.py', 'r').read())
# exec(open('./models/disparity_refinement.py', 'r').read())
from common import process_load
from common import process_autozoom
from common import process_kenburns
exec(open('./models/pointcloud_inpainting.py', 'r').read())
##########################################################
arguments_strIn = './images/mahdi.jpg'
arguments_strOut = './mahdi_test2.mp4'
for strOption, strArgument in \
getopt.getopt(sys.argv[1:], '', [strParameter[2:] + '=' for strParameter in sys.argv[1::2]])[0]:
if strOption == '--in' and strArgument != '': arguments_strIn = strArgument # path to the input image
if strOption == '--out' and strArgument != '': arguments_strOut = strArgument # path to where the output should be stored
# end
##########################################################
if __name__ == '__main__':
npyImage = cv2.imread(filename=arguments_strIn, flags=cv2.IMREAD_COLOR)
intWidth = npyImage.shape[1]
intHeight = npyImage.shape[0]
fltRatio = float(intWidth) / float(intHeight)
intWidth = min(int(1024 * fltRatio), 1024)
intHeight = min(int(1024 / fltRatio), 1024)
npyImage = cv2.resize(src=npyImage, dsize=(intWidth, intHeight), fx=0.0, fy=0.0, interpolation=cv2.INTER_AREA)
process_load(npyImage, {})
objFrom = {
'fltCenterU': intWidth / 2.0,
'fltCenterV': intHeight / 2.0,
'intCropWidth': int(math.floor(0.97 * intWidth)),
'intCropHeight': int(math.floor(0.97 * intHeight))
}
objTo = process_autozoom({
'fltShift': 100.0,
'fltZoom': 1.25,
'objFrom': objFrom
})
# fltZoom = 1.25
# objTo = {
# 'fltCenterU': objFrom['fltCenterU'],
# 'fltCenterV': objFrom['fltCenterV'],
# 'intCropWidth': int(round(objFrom['intCropWidth'] / fltZoom)),
# 'intCropHeight': int(round(objFrom['intCropHeight'] / fltZoom))
# }
npyResult = process_kenburns({
'fltSteps': numpy.linspace(0.0, 1.0, 75).tolist(),
'objFrom': objFrom,
'objTo': objTo,
'process_kenburns': True,
'fltZoom': 1.25
})
moviepy.editor.ImageSequenceClip(
sequence=[npyFrame[:, :, ::-1] for npyFrame in npyResult + list(reversed(npyResult))[1:]],
fps=25).write_videofile(arguments_strOut)
# end