diff --git a/frame_interpolation/rife/LICENSE b/frame_interpolation/rife/LICENSE
new file mode 100644
index 000000000..1c2b6ab57
--- /dev/null
+++ b/frame_interpolation/rife/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) Megvii Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/frame_interpolation/rife/README.md b/frame_interpolation/rife/README.md
new file mode 100644
index 000000000..f8a70c8f8
--- /dev/null
+++ b/frame_interpolation/rife/README.md
@@ -0,0 +1,59 @@
+# Real-Time Intermediate Flow Estimation for Video Frame Interpolation
+
+## Input
+
+
+
+(Image from https://drive.google.com/file/d/1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc/view?usp=sharing)
+
+## Output
+
+
+
+## Usage
+
+Automatically downloads the onnx and prototxt files on the first run.
+It is necessary to be connected to the Internet while downloading.
+
+For the sample images,
+```bash
+$ python3 rife.py
+```
+
+If you want to specify the input image, put the first image path after the `--input` option, and the next image path after the `--input2` option.
+You can use `--savepath` option to change the name of the output file to save.
+```bash
+$ python3 rife.py --input IMAGE_PATH1 --input2 IMAGE_PATH2 --savepath SAVE_IMAGE_PATH
+```
+
+The `--input` option can also specify the directory path where the images are located.
+```bash
+$ film rife.py --input DIR_PATH
+```
+
+By adding the `--video` option, you can input the video.
+If you pass `0` as an argument to VIDEO_PATH, you can use the webcam input instead of the video file.
+```bash
+$ python3 rife.py --video VIDEO_PATH --savepath SAVE_VIDEO_PATH
+```
+
+for 4X interpolation.
+```bash
+$ python3 rife.py --exp 2
+```
+
+## Reference
+
+- [ECCV2022-RIFE](https://github.com/megvii-research/ECCV2022-RIFE)
+
+## Framework
+
+Pytorch
+
+## Model Format
+
+ONNX opset=16
+
+## Netron
+
+[RIFE_HDv3.onnx.prototxt](https://netron.app/?url=https://storage.googleapis.com/ailia-models/rife/RIFE_HDv3.onnx.prototxt)
diff --git a/frame_interpolation/rife/imgs/000001.png b/frame_interpolation/rife/imgs/000001.png
new file mode 100644
index 000000000..c770217ae
Binary files /dev/null and b/frame_interpolation/rife/imgs/000001.png differ
diff --git a/frame_interpolation/rife/imgs/000002.png b/frame_interpolation/rife/imgs/000002.png
new file mode 100644
index 000000000..94afe5ae2
Binary files /dev/null and b/frame_interpolation/rife/imgs/000002.png differ
diff --git a/frame_interpolation/rife/imgs/000003.png b/frame_interpolation/rife/imgs/000003.png
new file mode 100644
index 000000000..c659fff60
Binary files /dev/null and b/frame_interpolation/rife/imgs/000003.png differ
diff --git a/frame_interpolation/rife/imgs_results/output_001.png b/frame_interpolation/rife/imgs_results/output_001.png
new file mode 100644
index 000000000..94ecac23e
Binary files /dev/null and b/frame_interpolation/rife/imgs_results/output_001.png differ
diff --git a/frame_interpolation/rife/rife.py b/frame_interpolation/rife/rife.py
index 834ada09c..4a6481675 100644
--- a/frame_interpolation/rife/rife.py
+++ b/frame_interpolation/rife/rife.py
@@ -23,13 +23,11 @@
# Parameters
# ======================
-WEIGHT_2X_PATH = 'RIFE_HDv3_2X.onnx'
-MODEL_2X_PATH = 'RIFE_HDv3_2X.onnx.prototxt'
-WEIGHT_4X_PATH = 'RIFE_HDv3_4X.onnx'
-MODEL_4X_PATH = 'RIFE_HDv3_4X.onnx.prototxt'
+WEIGHT_PATH = 'RIFE_HDv3.onnx'
+MODEL_PATH = 'RIFE_HDv3.onnx.prototxt'
REMOTE_PATH = 'https://storage.googleapis.com/ailia-models/rife/'
-IMAGE_PATH = 'photos'
+IMAGE_PATH = 'imgs'
SAVE_IMAGE_PATH = 'output.png'
NAME_EXT = os.path.splitext(SAVE_IMAGE_PATH)
@@ -46,8 +44,8 @@
help='The second input image path.'
)
parser.add_argument(
- '-m', '--model_type', default='2x', choices=('2x', '4x'),
- help='model type'
+ '--exp', type=int, default=1,
+ help='exp'
)
parser.add_argument(
'--onnx',
@@ -132,8 +130,8 @@ def make_inference(net, img1, img2, n):
if n == 1:
return [mid_img]
- first_half = make_inference(img1, mid_img, n=n // 2)
- second_half = make_inference(mid_img, img2, n=n // 2)
+ first_half = make_inference(net, img1, mid_img, n=n // 2)
+ second_half = make_inference(net, mid_img, img2, n=n // 2)
if n % 2:
return [*first_half, mid_img, *second_half]
else:
@@ -142,7 +140,7 @@ def make_inference(net, img1, img2, n):
def recognize_from_image(net):
inputs = args.input
-
+ exp = args.exp
copy_img = True
# Load images
@@ -170,7 +168,7 @@ def recognize_from_image(net):
total_time_estimation = 0
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
- out_img = predict(net, img1, img2)
+ mid_img = predict(net, img1, img2)
end = int(round(time.time() * 1000))
estimation_time = (end - start)
@@ -181,12 +179,10 @@ def recognize_from_image(net):
logger.info(f'\taverage time estimation {total_time_estimation / (args.benchmark_count - 1)} ms')
- save_file = "%s_%s%s" % (NAME_EXT[0], no, NAME_EXT[1])
- save_path = get_savepath(args.savepath, save_file, post_fix='', ext='.png')
- logger.info(f'saved at : {save_path}')
- cv2.imwrite(save_path, out_img)
+ copy_img = False
+ no = img_save(no, mid_img=mid_img)
else:
- output = make_inference(net, img1, img2, 1)
+ output = make_inference(net, img1, img2, 2 ** exp - 1)
if copy_img:
no = img_save(no, img_path=image_paths[0])
@@ -278,12 +274,6 @@ def recognize_from_video(net):
def main():
- model_dic = {
- '2x': (WEIGHT_2X_PATH, MODEL_2X_PATH),
- '4x': (WEIGHT_4X_PATH, MODEL_4X_PATH),
- }
- WEIGHT_PATH, MODEL_PATH = model_dic[args.model_type]
-
# model files check and download
check_and_download_models(WEIGHT_PATH, MODEL_PATH, REMOTE_PATH)