From f4bf0229ccd1ff4c8843728464a2ddfb7854dec0 Mon Sep 17 00:00:00 2001 From: Gremlinflat Date: Wed, 8 Mar 2023 14:52:55 +0700 Subject: [PATCH 1/3] feat: featuring custom backend device (auto, cuda, m1, cpu) --- .gitignore | 4 ++++ inference_realesrgan.py | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/.gitignore b/.gitignore index bb86ed0fd..91c6005f4 100644 --- a/.gitignore +++ b/.gitignore @@ -138,3 +138,7 @@ dmypy.json # Pyre type checker .pyre/ + +# VSCode +.vscode/ +.vscode/* \ No newline at end of file diff --git a/inference_realesrgan.py b/inference_realesrgan.py index 0a8cc43ad..bfc52692c 100644 --- a/inference_realesrgan.py +++ b/inference_realesrgan.py @@ -8,6 +8,8 @@ from realesrgan import RealESRGANer from realesrgan.archs.srvgg_arch import SRVGGNetCompact +from torch.cuda import is_available as cudaIsAvailable +from torch.backends.mps import is_available as mpsIsAvailable def main(): """Inference demo for Real-ESRGAN. @@ -52,6 +54,8 @@ def main(): parser.add_argument( '-g', '--gpu-id', type=int, default=None, help='gpu device to use (default=None) can be 0,1,2 for multi-gpu') + parser.add_argument('--backend_type', type=str, default='auto', choices=['auto', 'cuda', 'cpu', 'mps'], help='backend type. Options: auto(cuda-cpu) | cuda | cpu | mps') + args = parser.parse_args() # determine models according to model names @@ -103,6 +107,16 @@ def main(): model_path = [model_path, wdn_model_path] dni_weight = [args.denoise_strength, 1 - args.denoise_strength] + # deternime backend type (cpu, cuda, mps) + if args.backend_type == 'auto': + backend_type = 'cuda' if cudaIsAvailable() else 'cpu' + elif args.backend_type == 'cuda' and cudaIsAvailable(): + backend_type = 'cuda' + elif args.backend_type == 'mps' and mpsIsAvailable(): + backend_type = 'mps' + else: + backend_type = 'cpu' + # restorer upsampler = RealESRGANer( scale=netscale, @@ -113,6 +127,7 @@ def main(): tile_pad=args.tile_pad, pre_pad=args.pre_pad, half=not args.fp32, + device=backend_type, gpu_id=args.gpu_id) if args.face_enhance: # Use GFPGAN for face enhancement @@ -122,6 +137,7 @@ def main(): upscale=args.outscale, arch='clean', channel_multiplier=2, + device='cpu', # <--- MPS is not supported yet, crash pas runtime. TODO: FIX THIS bg_upsampler=upsampler) os.makedirs(args.output, exist_ok=True) From 3423a33ad585cf805c5afae51a31fb408955abe4 Mon Sep 17 00:00:00 2001 From: Gremlinflat Date: Wed, 8 Mar 2023 15:00:04 +0700 Subject: [PATCH 2/3] fix: tensor mathematical error '.contigous()' --- realesrgan/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/realesrgan/utils.py b/realesrgan/utils.py index 67e5232d6..a0c12ead6 100644 --- a/realesrgan/utils.py +++ b/realesrgan/utils.py @@ -89,7 +89,7 @@ def pre_process(self, img): """Pre-process, such as pre-pad and mod pad, so that the images can be divisible """ img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() - self.img = img.unsqueeze(0).to(self.device) + self.img = img.unsqueeze(0).contiguous().to(self.device) if self.half: self.img = self.img.half() From 6b5815c446bc6fa91cbdbb0529d9eae04c754593 Mon Sep 17 00:00:00 2001 From: Gremlinflat Date: Fri, 17 Mar 2023 21:03:10 +0700 Subject: [PATCH 3/3] fix: auto backend logics --- inference_realesrgan.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/inference_realesrgan.py b/inference_realesrgan.py index bfc52692c..9e6aadffe 100644 --- a/inference_realesrgan.py +++ b/inference_realesrgan.py @@ -109,7 +109,12 @@ def main(): # deternime backend type (cpu, cuda, mps) if args.backend_type == 'auto': - backend_type = 'cuda' if cudaIsAvailable() else 'cpu' + if cudaIsAvailable(): + backend_type = 'cuda' + elif mpsIsAvailable(): + backend_type = 'mps' + else: + backend_type = 'cpu' elif args.backend_type == 'cuda' and cudaIsAvailable(): backend_type = 'cuda' elif args.backend_type == 'mps' and mpsIsAvailable():