forked from usamaehsan/stable_diffusion_1.5_inpaint
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
86 lines (73 loc) · 2.85 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import os
import torch
from diffusers import (StableDiffusionInpaintPipeline,
PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler)
import torch
from torch import autocast
import base64
from io import BytesIO
from PIL import Image
model_path = "stabilityai/stable-diffusion-2-1"
inpainting_model_path = "stabilityai/stable-diffusion-2-inpainting"
def make_scheduler(name, config):
return {
'PNDM': PNDMScheduler.from_config(config),
'KLMS': LMSDiscreteScheduler.from_config(config),
'DDIM': DDIMScheduler.from_config(config),
'K_EULER': EulerDiscreteScheduler.from_config(config),
'K_EULER_ANCESTRAL': EulerAncestralDiscreteScheduler.from_config(config),
'DPMSolverMultistep': DPMSolverMultistepScheduler.from_config(config),
}[name]
def init():
global model
model = StableDiffusionInpaintPipeline.from_pretrained(
inpainting_model_path,
torch_dtype=torch.float16,
)
def inference(model_inputs):
global model
prompt = model_inputs.get('prompt', None)
negative_prompt = model_inputs.get('negative_prompt', None)
height = model_inputs.get('height', 512)
width = model_inputs.get('width', 512)
steps = model_inputs.get('steps', 20)
guidance_scale = model_inputs.get('guidance_scale', 7)
seed = model_inputs.get('seed', None)
scheduler = model_inputs.get('scheduler', 'K_EULER_ANCESTRAL')
mask = model_inputs.get('mask', None)
init_image = model_inputs.get('init_image', None)
init_image = Image.open(BytesIO(base64.b64decode(init_image.encode('utf-8'))))
mask = Image.open(BytesIO(base64.b64decode(mask.encode('utf-8'))))
extra_kwargs = {}
if not prompt:
return {'message': 'No prompt was provided'}
if not mask:
return {'message': 'No mask was provided'}
if not init_image:
raise ValueError("mask was provided without init_image")
init_image = init_image.convert("RGB")
extra_kwargs = {
"mask_image": mask.convert("RGB").resize(init_image.size),
"image": init_image,
"width": width,
"height": height,
}
model = model.to("cuda")
generator = None
if seed:
generator = torch.Generator('cuda').manual_seed(seed)
model.scheduler = make_scheduler(scheduler, model.scheduler.config)
with autocast('cuda'):
image = model(
prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
generator=generator,
num_inference_steps=steps,
**extra_kwargs,
).images[0]
buffered = BytesIO()
image.save(buffered, format='JPEG')
image_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
return {'image_base64': image_base64}