-
Notifications
You must be signed in to change notification settings - Fork 90
/
Copy pathoperators.py
1252 lines (956 loc) · 46.2 KB
/
operators.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import bpy
import functools
import math
import random
import re
import time
from . import (
analytics,
config,
progress_bar,
task_queue,
utils,
)
from .sd_backends import automatic1111_api
example_dimensions_tuple_list = utils.generate_example_dimensions_tuple_list()
sdxl_1024_dimensions_tuple_list = utils.generate_sdxl_1024_dimensions_tuple_list()
def enable_air(scene):
# register the task queue (this also needs to be done post-load,
# because app timers get stopped when loading a new blender file)
task_queue.register()
# clear any possible past errors in the file (this would happen if ai render
# was enabled in a file that we just opened, and it had been saved with
# an error from a past render)
clear_error(scene)
def mute_legacy_compositor_node_group(scene):
if scene.node_tree and scene.node_tree.nodes:
legacy_node_group = scene.node_tree.nodes.get('AIR')
if legacy_node_group:
legacy_node_group.mute = True
def set_image_dimensions(context, width, height):
context.scene.render.resolution_x = width
context.scene.render.resolution_y = height
context.scene.render.resolution_percentage = 100
clear_error(context.scene)
def handle_error(msg, error_key = ''):
"""Show an error popup, and set the error message to be displayed in the ui"""
print("AI Render Error:", msg)
task_queue.add(functools.partial(bpy.ops.ai_render.show_error_popup, 'INVOKE_DEFAULT', error_message=msg, error_key=error_key))
analytics.track_event('ai_render_error', value=error_key)
return False
def set_silent_error(scene, msg, error_key = ''):
"""Set the error message to be displayed in the ui, but don't show a popup"""
print("AI Render Error:", msg)
scene.air_props.error_message = msg
scene.air_props.error_key = error_key
def clear_error(scene):
"""Clear the error message in the ui"""
scene.air_props.error_message = ''
scene.air_props.error_key = ''
def clear_error_handler(self, context):
clear_error(context.scene)
def generate_new_random_seed(scene):
props = scene.air_props
if (props.use_random_seed):
props.seed = random.randint(1000000000, 2147483647)
def ensure_animated_prompts_text():
text = utils.get_animated_prompt_text_data_block()
if text:
text.select_set(0, 0, -1, -1)
else:
text = bpy.data.texts.new(config.animated_prompts_text_name)
text.write("1: Stable Diffusion Prompt starting at frame 1\n")
text.write("30: Stable Diffusion Prompt starting at frame 30\n")
text.write("# etc...\n")
text.write("\n")
text.write("# You can also include negative prompts\n")
text.write(f"# See more info at {config.HELP_WITH_NEGATIVE_PROMPTS_URL}\n")
text.write("Negative:\n")
text.write("1: ugly, bad art, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, tiling, signature, cut off, draft\n")
text.select_set(0, 3, 0, -1)
def ensure_animated_prompts_text_editor(context):
script_area = None
areas = utils.get_areas_by_type('TEXT_EDITOR', context=context)
if len(areas) > 0:
script_area = areas[0]
if not script_area:
# get current area
area = context.area
if area is None:
area = context.screen.areas[0]
utils.split_area(context, area, factor=0.5)
# create a new text editor area
areas = utils.get_areas_by_type(area.type, context=context)
if len(areas) > 0:
script_area = areas[-1]
script_area.type = 'TEXT_EDITOR'
script_area.spaces[0].text = utils.get_animated_prompt_text_data_block()
def render_frame(context, current_frame, prompts):
"""Render the current frame as part of an animation"""
# set the frame
context.scene.frame_set(current_frame)
# render the frame
bpy.ops.render.render()
# post to the api
return sd_generate(context.scene, prompts)
def save_render_to_file(scene, filename_prefix):
try:
temp_file = utils.create_temp_file(filename_prefix + "-", suffix=f".{utils.get_image_format()}")
except:
return handle_error("Couldn't create temp file for image", "temp_file")
try:
orig_render_file_format = scene.render.image_settings.file_format
orig_render_color_mode = scene.render.image_settings.color_mode
orig_render_color_depth = scene.render.image_settings.color_depth
scene.render.image_settings.file_format = utils.get_image_format(to_lower=False)
scene.render.image_settings.color_mode = 'RGBA'
scene.render.image_settings.color_depth = '8'
bpy.data.images['Render Result'].save_render(temp_file)
scene.render.image_settings.file_format = orig_render_file_format
scene.render.image_settings.color_mode = orig_render_color_mode
scene.render.image_settings.color_depth = orig_render_color_depth
except:
return handle_error("Couldn't save rendered image", "save_render")
return temp_file
def save_before_image(scene, filename_prefix):
ext = utils.get_extension_from_file_format(scene.render.image_settings.file_format)
if ext:
ext = f".{ext}"
filename = f"{filename_prefix}{ext}"
full_path_and_filename = utils.get_absolute_path_for_output_file(scene.air_props.autosave_image_path, filename)
try:
bpy.data.images['Render Result'].save_render(bpy.path.abspath(full_path_and_filename))
except:
return handle_error(f"Couldn't save 'before' image to {bpy.path.abspath(full_path_and_filename)}", "save_image")
def save_after_image(scene, filename_prefix, img_file):
filename = f"{filename_prefix}.{utils.get_image_format()}"
full_path_and_filename = utils.get_absolute_path_for_output_file(scene.air_props.autosave_image_path, filename)
try:
utils.copy_file(img_file, full_path_and_filename)
return full_path_and_filename
except:
return handle_error(f"Couldn't save 'after' image to {bpy.path.abspath(full_path_and_filename)}", "save_image")
def save_animation_image(scene, filename_prefix, img_file):
filename = f"{filename_prefix}{str(scene.frame_current).zfill(4)}.{utils.get_image_format()}"
full_path_and_filename = utils.get_absolute_path_for_output_file(scene.air_props.animation_output_path, filename)
try:
utils.copy_file(img_file, full_path_and_filename)
return full_path_and_filename
except:
return handle_error(f"Couldn't save animation image to {bpy.path.abspath(full_path_and_filename)}", "save_image")
def load_image(filename, data_block_name=None):
name = filename
if data_block_name:
name = data_block_name
if name in bpy.data.images:
existing_img = bpy.data.images[name]
existing_img.filepath = filename
return existing_img
img_file = bpy.data.images.load(filename, check_existing=False)
img_file.name = name
return img_file
def do_pre_render_setup(scene):
# Lock the user interface when rendering, so that we can change
# compositor nodes in the render_init handler without causing a crash!
# See: https://docs.blender.org/api/current/bpy.app.handlers.html#note-on-altering-data
scene.render.use_lock_interface = True
# clear any previous errors
clear_error(scene)
# mute the legacy compositor node group, if it exists
mute_legacy_compositor_node_group(scene)
def do_pre_api_setup(scene):
# TODO: does nothing at the moment
pass
def validate_params(scene, prompt=None):
if utils.get_dream_studio_api_key().strip() == "" and utils.sd_backend() == "dreamstudio":
return handle_error("You must enter an API Key to render with DreamStudio", "api_key")
if not utils.are_dimensions_valid(scene):
return handle_error("Please set width and height to valid values", "invalid_dimensions")
if utils.are_dimensions_too_small(scene):
return handle_error("Image dimensions are too small. Please increase width and/or height", "dimensions_too_small")
if utils.are_dimensions_too_large(scene):
return handle_error("Image dimensions are too large. Please decrease width and/or height", "dimensions_too_large")
if prompt == "":
return handle_error("Please enter a prompt for Stable Diffusion", "prompt")
return True
def validate_animation_output_path(scene):
props = scene.air_props
if not utils.does_path_exist(props.animation_output_path):
return handle_error("Animation output path does not exist", "animation_output_path")
else:
return True
def get_full_prompt(scene, prompt=None):
props = scene.air_props
if prompt is None:
prompt = props.prompt_text.strip()
if prompt == config.default_prompt_text:
prompt = ""
if props.use_preset:
prompt = props.preset_style.replace("{prompt}", prompt)
return prompt
def get_prompt_at_frame(animated_prompts, frame):
for line in reversed(animated_prompts):
if line['start_frame'] <= frame:
return line['prompt']
return ""
def validate_and_process_animated_prompt_text(scene):
text_data = utils.get_animated_prompt_text_data_block()
if text_data is None:
return handle_error("Animated prompt text does not exist. Please edit animated prompts.", "animated_prompt_text_data_block")
lines = text_data.as_string().splitlines()
lines = [line.strip() for line in lines]
# find "Negative:" in lines, if it exists
negative_index = -1
for i, line in enumerate(lines):
if line.lower() == "negative:":
negative_index = i
break
if negative_index > -1:
positive_lines = lines[:negative_index]
negative_lines = lines[negative_index+1:]
else:
positive_lines = lines
negative_lines = []
def parse_lines(lines, is_positive=True):
r = re.compile('^(\d+):(.*)')
lines = list(filter(r.match, lines))
processed_lines = []
for line in lines:
m = r.match(line)
if m:
start_frame = int(m.group(1))
prompt = m.group(2).strip()
processed_lines.append({
'start_frame': start_frame,
'prompt': get_full_prompt(scene, prompt=prompt) if is_positive else prompt,
})
if is_positive:
processed_lines = list(filter(lambda x: x['prompt'] != "", processed_lines))
if len(processed_lines) == 0 and is_positive:
return handle_error(f"Animated Prompt text is empty or invalid. [Get help with animated prompts]({config.HELP_WITH_ANIMATED_PROMPTS_URL})", "animated_prompt_text")
if len(processed_lines) > 0:
processed_lines.sort(key=lambda x: x['start_frame'])
processed_lines[0]['start_frame'] = 1 # ensure the first frame is 1
return processed_lines
positive_lines = parse_lines(positive_lines)
negative_lines = parse_lines(negative_lines, is_positive=False)
return positive_lines, negative_lines
def validate_and_process_animated_prompt_text_for_single_frame(scene, frame):
positive_lines, negative_lines = validate_and_process_animated_prompt_text(scene)
if not positive_lines:
return None, None
else:
return get_prompt_at_frame(positive_lines, frame), get_prompt_at_frame(negative_lines, frame)
def sd_generate(scene, prompts=None, use_last_sd_image=False):
"""Post to the API to generate a Stable Diffusion image and then process it"""
props = scene.air_props
# get the prompt if we haven't been given one
if not prompts:
if props.use_animated_prompts:
prompt, negative_prompt = validate_and_process_animated_prompt_text_for_single_frame(scene, scene.frame_current)
if not prompt:
return False
else:
prompt = get_full_prompt(scene)
negative_prompt = props.negative_prompt_text.strip()
else:
prompt = prompts["prompt"]
negative_prompt = prompts["negative_prompt"]
# validate the parameters we will send
if not validate_params(scene, prompt):
return False
# generate a new seed, if we want a random one
generate_new_random_seed(scene)
# prepare the output filenames
before_output_filename_prefix = utils.get_image_filename(scene, prompt, negative_prompt, "-1-before")
after_output_filename_prefix = utils.get_image_filename(scene, prompt, negative_prompt, "-2-after")
animation_output_filename_prefix = "ai-render-"
# if we want to use the last SD image, try loading it now
if use_last_sd_image:
if not props.last_generated_image_filename:
return handle_error("Couldn't find the last Stable Diffusion image", "last_generated_image_filename")
try:
img_file = open(props.last_generated_image_filename, 'rb')
except:
return handle_error("Couldn't load the last Stable Diffusion image. It's probably been deleted or moved. You'll need to restore it or render a new image.", "load_last_generated_image")
else:
# else, use the rendered image...
# save the rendered image and then read it back in
temp_input_file = save_render_to_file(scene, before_output_filename_prefix)
if not temp_input_file:
return False
img_file = open(temp_input_file, 'rb')
# autosave the before image, if we want that, and we're not rendering an animation
if (
props.do_autosave_before_images
and props.autosave_image_path
and not props.is_rendering_animation
and not props.is_rendering_animation_manually
):
save_before_image(scene, before_output_filename_prefix)
# prepare data for the API request
params = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": utils.get_output_width(scene),
"height": utils.get_output_height(scene),
"image_similarity": props.image_similarity,
"seed": props.seed,
"cfg_scale": props.cfg_scale,
"steps": props.steps,
"sampler": props.sampler,
}
# get the backend we're using
sd_backend = utils.get_active_backend()
# send to whichever API we're using
start_time = time.time()
generated_image_file = sd_backend.generate(params, img_file, after_output_filename_prefix, props)
# if we didn't get a successful image, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False
# autosave the after image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# store this image filename as the last generated image
props.last_generated_image_filename = generated_image_file
# if we want to automatically upscale (and the backend supports it), do it now
if props.do_upscale_automatically and sd_backend.supports_upscaling() and sd_backend.is_upscaler_model_list_loaded():
after_output_filename_prefix = after_output_filename_prefix + "-upscaled"
opened_image_file = open(generated_image_file, 'rb')
generated_image_file = sd_backend.upscale(opened_image_file, after_output_filename_prefix, props)
# if the upscale failed, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False
# autosave the upscaled after image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# if we're rendering an animation manually, save the image to the animation output path
if props.is_rendering_animation_manually:
generated_image_file = save_animation_image(scene, animation_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# load the image into our scene
try:
img = load_image(generated_image_file, after_output_filename_prefix)
except:
return handle_error("Couldn't load the image from Stable Diffusion", "load_sd_image")
try:
# View the image in the Render Result view
utils.view_sd_in_render_view(img, scene)
except:
return handle_error("Couldn't switch the view to the image from Stable Diffusion", "view_sd_image")
# track an analytics event
additional_params = {
"backend": utils.sd_backend(),
"model": props.sd_model if sd_backend.supports_choosing_model() else "none",
"preset_style": props.preset_style if props.use_preset else "none",
"is_animation_frame": "yes" if prompts else "no",
"has_animated_prompt": "yes" if props.use_animated_prompts else "no",
"upscale_enabled": "yes" if props.do_upscale_automatically else "no",
"upscale_factor": props.upscale_factor,
"upscaler_model": props.upscaler_model,
"duration": round(time.time() - start_time),
}
if props.controlnet_is_enabled and utils.sd_backend() == "automatic1111":
additional_params["controlnet_enabled"] = "yes"
additional_params["controlnet_model"] = props.controlnet_model
additional_params["controlnet_module"] = props.controlnet_module
else:
additional_params["controlnet_enabled"] = "no"
additional_params["controlnet_model"] = "none"
additional_params["controlnet_module"] = "none"
event_params = analytics.prepare_event('generate_image', generation_params=params, additional_params=additional_params)
analytics.track_event('generate_image', event_params=event_params)
# return success
return True
def sd_upscale(scene):
"""Post to the API to upscale the most recent Stable Diffusion image and then process it"""
props = scene.air_props
# try loading the last SD image
if not props.last_generated_image_filename:
return handle_error("Couldn't find the last Stable Diffusion image", "last_generated_image_filename")
try:
img_file = open(props.last_generated_image_filename, 'rb')
except:
return handle_error("Couldn't load the last Stable Diffusion image. It's probably been deleted or moved. You'll need to restore it or render a new image.", "load_last_generated_image")
# create a filename for the after image, based on the before image
# get the filename from the full path and filename
after_output_filename_prefix = utils.get_filename_from_path(props.last_generated_image_filename, False) + "-upscaled"
# get the backend we're using
sd_backend = utils.get_active_backend()
# send to whichever API we're using
start_time = time.time()
generated_image_file = sd_backend.upscale(img_file, after_output_filename_prefix, props)
# if we didn't get a successful image, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False
# autosave the image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# load the image into our scene
try:
img = load_image(generated_image_file, after_output_filename_prefix)
except:
return handle_error("Couldn't load the image from Stable Diffusion", "load_sd_image")
try:
# View the image in the Render Result view
utils.view_sd_in_render_view(img, scene)
except:
return handle_error("Couldn't switch the view to the image from Stable Diffusion", "view_sd_image")
# track an analytics event
additional_params = {
"backend": utils.sd_backend(),
"upscale_factor": props.upscale_factor,
"upscaler_model": props.upscaler_model,
"duration": round(time.time() - start_time),
}
event_params = analytics.prepare_event('upscale_image', additional_params=additional_params)
analytics.track_event('upscale_image', event_params=event_params)
# return success
return True
# Inpainting
def sd_inpaint(scene):
"""Post to the API to generate a Stable Diffusion image with inpainting, and then process it"""
props = scene.air_props
# get the prompt if we haven't been given one
if props.use_animated_prompts:
prompt, negative_prompt = validate_and_process_animated_prompt_text_for_single_frame(scene, scene.frame_current)
if not prompt:
return False
else:
prompt = get_full_prompt(scene)
negative_prompt = props.negative_prompt_text.strip()
# validate the parameters we will send
if not validate_params(scene, prompt):
return False
# generate a new seed, if we want a random one
generate_new_random_seed(scene)
# prepare the output filenames
before_output_filename_prefix = utils.get_image_filename(scene, prompt, negative_prompt, "-1-before")
after_output_filename_prefix = utils.get_image_filename(scene, prompt, negative_prompt, "-2-inpainted")
animation_output_filename_prefix = "ai-render-"
# if we want to use the last SD image, try loading it now
if not props.last_generated_image_filename:
return handle_error("Couldn't find the last Stable Diffusion image", "last_generated_image_filename")
try:
img_file = open(props.last_generated_image_filename, 'rb')
except:
return handle_error("Couldn't load the last Stable Diffusion image. It's probably been deleted or moved. You'll need to restore it or render a new image.", "load_last_generated_image")
# load mask here
if props.inpaint_mask_path == "":
return handle_error("Couldn't find the Inpaint Mask File", "inpaint_mask_path")
try:
mask_file = open(props.inpaint_mask_path, 'rb')
except:
return handle_error("Couldn't load the uploaded inpaint mask file", "inpaint_mask_path")
# prepare data for the API request
params = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": utils.get_output_width(scene),
"height": utils.get_output_height(scene),
"seed": props.seed,
"cfg_scale": props.cfg_scale,
"steps": props.steps,
"is_full_res" : props.inpaint_full_res,
"full_res_padding" : props.inpaint_padding,
}
# get the backend we're using
sd_backend = utils.get_active_backend()
# send to whichever API we're using
start_time = time.time()
generated_image_file = sd_backend.inpaint(params, img_file, mask_file, after_output_filename_prefix, props)
# if we didn't get a successful image, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False
# autosave the after image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# store this image filename as the last generated image
props.last_generated_image_filename = generated_image_file
# if we're rendering an animation manually, save the image to the animation output path
if props.is_rendering_animation_manually:
generated_image_file = save_animation_image(scene, animation_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# load the image into our scene
try:
img = load_image(generated_image_file, after_output_filename_prefix)
except:
return handle_error("Couldn't load the image from Stable Diffusion", "load_sd_image")
try:
# View the image in the Render Result view
utils.view_sd_in_render_view(img, scene)
except:
return handle_error("Couldn't switch the view to the image from Stable Diffusion", "view_sd_image")
# return success
return True
# Outpainting
def sd_outpaint(scene):
"""Post to the API to generate a Stable Diffusion image with outpainting, and then process it"""
props = scene.air_props
# get the prompt if we haven't been given one
if props.use_animated_prompts:
prompt, negative_prompt = validate_and_process_animated_prompt_text_for_single_frame(scene, scene.frame_current)
if not prompt:
return False
else:
prompt = get_full_prompt(scene)
negative_prompt = props.negative_prompt_text.strip()
# validate the parameters we will send
if not validate_params(scene, prompt):
return False
# generate a new seed, if we want a random one
generate_new_random_seed(scene)
# prepare the output filenames
before_output_filename_prefix = utils.get_image_filename(scene, prompt, negative_prompt, "-1-before")
after_output_filename_prefix = utils.get_image_filename(scene, prompt, negative_prompt, "-2-outpainted")
animation_output_filename_prefix = "ai-render-"
# if we want to use the last SD image, try loading it now
if not props.last_generated_image_filename:
return handle_error("Couldn't find the last Stable Diffusion image", "last_generated_image_filename")
try:
img_file = open(props.last_generated_image_filename, 'rb')
except:
return handle_error("Couldn't load the last Stable Diffusion image. It's probably been deleted or moved. You'll need to restore it or render a new image.", "load_last_generated_image")
# prepare data for the API request
params = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": utils.get_output_width(scene),
"height": utils.get_output_height(scene),
"seed": props.seed,
"cfg_scale": props.cfg_scale,
"steps": props.steps,
"pixels": props.outpaint_pixels_to_expand,
"mask_blur": props.outpaint_mask_blur,
"directions": [props.outpaint_direction],
"noise_q": props.outpaint_noise_q,
"color_variation": props.outpaint_color_variation,
}
# get the backend we're using
sd_backend = utils.get_active_backend()
# send to whichever API we're using
start_time = time.time()
generated_image_file = sd_backend.outpaint(params, img_file, after_output_filename_prefix, props)
# if we didn't get a successful image, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False
# autosave the after image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# store this image filename as the last generated image
props.last_generated_image_filename = generated_image_file
# if we're rendering an animation manually, save the image to the animation output path
if props.is_rendering_animation_manually:
generated_image_file = save_animation_image(scene, animation_output_filename_prefix, generated_image_file)
if not generated_image_file:
return False
# load the image into our scene
try:
img = load_image(generated_image_file, after_output_filename_prefix)
except:
return handle_error("Couldn't load the image from Stable Diffusion", "load_sd_image")
try:
# View the image in the Render Result view
utils.view_sd_in_render_view(img, scene)
except:
return handle_error("Couldn't switch the view to the image from Stable Diffusion", "view_sd_image")
# return success
return True
class AIR_OT_enable(bpy.types.Operator):
"Enable AI Render in this scene"
bl_idname = "ai_render.enable"
bl_label = "Enable AI Render"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
enable_air(context.scene)
context.scene.air_props.is_enabled = True
return {'FINISHED'}
class AIR_OT_disable(bpy.types.Operator):
"Disable AI Render in this scene"
bl_idname = "ai_render.disable"
bl_label = "Disable AI Render"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
context.scene.air_props.is_enabled = False
return {'FINISHED'}
class AIR_OT_set_image_size_to_1024x1024(bpy.types.Operator):
"Set render width and height to 1024 x 1024"
bl_idname = "ai_render.set_image_size_to_1024x1024"
bl_label = "1024 x 1024"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
set_image_dimensions(context, 1024, 1024)
return {'FINISHED'}
class AIR_OT_show_other_dimension_options(bpy.types.Operator):
"Other options for image size"
bl_idname = "ai_render.show_other_dimension_options"
bl_label = "Image Size Options"
bl_options = {'REGISTER', 'UNDO'}
panel_width = 250
width: bpy.props.EnumProperty(
name="Image Width",
default="1024",
items=example_dimensions_tuple_list,
description="Image Width"
)
height: bpy.props.EnumProperty(
name="Image Height",
default="1024",
items=example_dimensions_tuple_list,
description="Image Height"
)
def draw(self, context):
layout = self.layout
utils.label_multiline(layout, text=f"Choose dimensions that Stable Diffusion can work with. (If you're unsure, try 1024x1024). Dimensions can be any multiple of {utils.valid_dimension_step_size} in the range {utils.min_dimension_size}-{utils.max_dimension_size}.", width=self.panel_width)
layout.separator()
row = layout.row()
row.label(text="Common Dimensions:")
row = layout.row()
col = row.column()
col.label(text="Width:")
col = row.column()
col.prop(self, "width", text="")
row = layout.row()
col = row.column()
col.label(text="Height:")
col = row.column()
col.prop(self, "height", text="")
layout.separator()
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self, width=self.panel_width)
def execute(self, context):
set_image_dimensions(context, int(self.width), int(self.height))
return {'FINISHED'}
class AIR_OT_show_dimension_options_for_sdxl_1024(bpy.types.Operator):
"Options for image size with SDXL 1024"
bl_idname = "ai_render.show_dimension_options_for_sdxl_1024"
bl_label = "Image Size Options"
bl_options = {'REGISTER', 'UNDO'}
panel_width = 250
dimensions: bpy.props.EnumProperty(
name="Image Height",
default="1024x1024",
items=sdxl_1024_dimensions_tuple_list,
description="Image Dimensions"
)
def draw(self, context):
layout = self.layout
utils.label_multiline(layout, text=f"Choose dimensions that Stable Diffusion can work with. SDXL images must be one of the these image sizes:", width=self.panel_width)
layout.separator()
row = layout.row()
col = row.column()
col.label(text="Dimensions:")
col = row.column()
col.prop(self, "dimensions", text="")
layout.separator()
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self, width=self.panel_width)
def execute(self, context):
set_image_dimensions(context, int(self.dimensions.split('x')[0]), int(self.dimensions.split('x')[1]))
return {'FINISHED'}
class AIR_OT_copy_preset_text(bpy.types.Operator):
"Copy preset text to clipboard"
bl_idname = "ai_render.copy_preset_text"
bl_label = "Copy Preset Text"
def execute(self, context):
context.window_manager.clipboard = context.scene.air_props.preset_style
self.report({'INFO'}, "Preset text copied to clipboard")
return {'FINISHED'}
class AIR_OT_edit_animated_prompts(bpy.types.Operator):
"Show the animated prompts panel, and focus it"
bl_idname = "ai_render.edit_animated_prompts"
bl_label = "Edit Animated Prompts"
def execute(self, context):
ensure_animated_prompts_text()
task_queue.add(functools.partial(ensure_animated_prompts_text_editor, context))
return {'FINISHED'}
class AIR_OT_generate_new_image_from_render(bpy.types.Operator):
"Generate a new Stable Diffusion image - without re-rendering - from the last rendered image"
bl_idname = "ai_render.generate_new_image_from_render"
bl_label = "New Image From Last Render"
def execute(self, context):
do_pre_render_setup(context.scene)
do_pre_api_setup(context.scene)
# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(sd_generate, context.scene))
return {'FINISHED'}
class AIR_OT_generate_new_image_from_last_sd_image(bpy.types.Operator):
"Generate a new Stable Diffusion image - without re-rendering - using the most recent Stable Diffusion image as the starting point"
bl_idname = "ai_render.generate_new_image_from_current"
bl_label = "New Image From Last AI Image"
def execute(self, context):
do_pre_render_setup(context.scene)
do_pre_api_setup(context.scene)
# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(sd_generate, context.scene, None, True))
return {'FINISHED'}
class AIR_OT_upscale_last_sd_image(bpy.types.Operator):
"Upscale the most recent Stable Diffusion image"
bl_idname = "ai_render.upscale_last_sd_image"
bl_label = "Upscale Last AI Image"
def execute(self, context):
do_pre_render_setup(context.scene)
do_pre_api_setup(context.scene)
# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(sd_upscale, context.scene))
return {'FINISHED'}
class AIR_OT_render_animation(bpy.types.Operator):
"Render an animation using Stable Diffusion"
bl_idname = "ai_render.render_animation"
bl_label = "Render Animation"
_timer = None
_ticks_since_last_render = 0
_finished = True
_start_frame = 0
_end_frame = 0
_frame_step = 1
_current_frame = 0
_orig_current_frame = 0
_animated_prompts = None
_animated_negative_prompts = None
_static_prompt = None
_negative_static_prompt = None
def _pre_render(self, context):
scene = context.scene
# do validation and setup
if validate_params(scene) and validate_animation_output_path(scene):
do_pre_render_setup(scene)
do_pre_api_setup(scene)
else:
return False
# validate and process the animated prompts, if we are using them
if context.scene.air_props.use_animated_prompts:
self._animated_prompts, self._animated_negative_prompts = validate_and_process_animated_prompt_text(context.scene)
if not self._animated_prompts:
return False
else:
self._animated_prompts = None
self._static_prompt = get_full_prompt(context.scene)
self._negative_static_prompt = scene.air_props.negative_prompt_text.strip()
return True
def _start_render(self, context):
self._finished = False
self._orig_current_frame = context.scene.frame_current
self._start_frame = context.scene.frame_start
self._end_frame = context.scene.frame_end
self._frame_step = context.scene.frame_step
self._current_frame = context.scene.frame_start
context.scene.air_props.is_rendering_animation_manually = True
context.scene.air_progress_status_message = ""
context.scene.air_progress_label = self._get_label()
context.scene.air_progress = 0
self._ticks_since_last_render = 0
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
def _end_render(self, context, status_message):
self._finished = True
context.scene.frame_current = self._orig_current_frame
context.scene.air_props.is_rendering_animation_manually = False
context.scene.air_progress_status_message = status_message
progress_bar.hide_progress_bar_after_delay()
context.window_manager.event_timer_remove(self._timer)
def _advance_frame(self, context):
self._current_frame += self._frame_step
if self._current_frame > context.scene.frame_end: