You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Can someone please help me resolve or at least understand where and why this error occurs?
Apparently both "Float and "Half" are used but I can't understand why the problem occurs, since I configured everything exactly as in the instructions.
Right now I'm on Forge and I'm using ControlNet v1.1.455 (the official one, not the Forge Built-in one).
The preprocessors and model work as they give me the preview of the unit.
The problem only occurs when I try to use "instant_id_face_embedding" with "ip-adapter_instant_id_sdxl [eb2d3ec0]".
Forge is up to date, ControlNet is up to date.
I already checked, there are no conflicts with other extensions.
Can anyone help me understand? Thanks in advance.
This is the Error Log.
Traceback (most recent call last):
File "F:\Forge\modules_forge\main_thread.py", line 37, in loop
task.work()
File "F:\Forge\modules_forge\main_thread.py", line 26, in work
self.result = self.func(*self.args, **self.kwargs)
File "F:\Forge\modules\txt2img.py", line 111, in txt2img_function
processed = processing.process_images(p)
File "F:\Forge\modules\processing.py", line 752, in process_images
res = process_images_inner(p)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\batch_hijack.py", line 59, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "F:\Forge\modules\processing.py", line 922, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 470, in process_sample
return process.sample_before_CN_hack(*args, **kwargs)
File "F:\Forge\modules\processing.py", line 1275, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "F:\Forge\modules\sd_samplers_kdiffusion.py", line 251, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "F:\Forge\modules\sd_samplers_common.py", line 263, in launch_sampling
return func()
File "F:\Forge\modules\sd_samplers_kdiffusion.py", line 251, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "F:\Forge\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\Forge\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\modules\sd_samplers_cfg_denoiser.py", line 182, in forward
denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params,
File "F:\Forge\modules_forge\forge_sampler.py", line 88, in forge_sample
denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed)
File "F:\Forge\ldm_patched\modules\samplers.py", line 289, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options)
File "F:\Forge\ldm_patched\modules\samplers.py", line 258, in calc_cond_uncond_batch
output = model.apply_model(input_x, timestep, **c).chunk(batch_chunks)
File "F:\Forge\ldm_patched\modules\model_base.py", line 90, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 905, in forward_webui
raise e
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 902, in forward_webui
return forward(*args, **kwargs)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 800, in forward
emb = self.time_embed(t_emb)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\container.py", line 215, in forward
input = module(input)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\ldm_patched\modules\ops.py", line 98, in forward
return super().forward(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 must have the same dtype, but got Float and Half
mat1 and mat2 must have the same dtype, but got Float and Half
*** Error completing request
*** Arguments: ('task(b2b0ehyxjn8aeke)', <gradio.routes.Request object at 0x000001F6F10C5AB0>, 'score_9, score_8_up, score_7_up, score_6_up, score_5_up, score_4_up,\nredhead girl, slim body, long hair, portrait, looking at viewer,', '', [], 20, 'Euler a', 1, 1, 4, 832, 1024, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, {'ad_model': 'face_yolov8n.pt', 'ad_model_classes': '', 'ad_tab_enable': True, 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.75, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.35, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': True, 'ad_inpaint_width': 1024, 'ad_inpaint_height': 1024, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=True, module='instant_id_face_embedding', model='ip-adapter_instant_id_sdxl [eb2d3ec0]', weight=1.0, image={'image': array([[[ 49, 52, 59],
*** [ 49, 52, 59],
*** [ 49, 52, 59],
*** ...,
*** [234, 237, 244],
*** [238, 239, 244],
*** [238, 239, 244]],
Can someone please help me resolve or at least understand where and why this error occurs?
Apparently both "Float and "Half" are used but I can't understand why the problem occurs, since I configured everything exactly as in the instructions.
Right now I'm on Forge and I'm using ControlNet v1.1.455 (the official one, not the Forge Built-in one).
The preprocessors and model work as they give me the preview of the unit.
The problem only occurs when I try to use "instant_id_face_embedding" with "ip-adapter_instant_id_sdxl [eb2d3ec0]".
Forge is up to date, ControlNet is up to date.
I already checked, there are no conflicts with other extensions.
Can anyone help me understand? Thanks in advance.
This is the Error Log.
Traceback (most recent call last):
File "F:\Forge\modules_forge\main_thread.py", line 37, in loop
task.work()
File "F:\Forge\modules_forge\main_thread.py", line 26, in work
self.result = self.func(*self.args, **self.kwargs)
File "F:\Forge\modules\txt2img.py", line 111, in txt2img_function
processed = processing.process_images(p)
File "F:\Forge\modules\processing.py", line 752, in process_images
res = process_images_inner(p)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\batch_hijack.py", line 59, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "F:\Forge\modules\processing.py", line 922, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 470, in process_sample
return process.sample_before_CN_hack(*args, **kwargs)
File "F:\Forge\modules\processing.py", line 1275, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "F:\Forge\modules\sd_samplers_kdiffusion.py", line 251, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "F:\Forge\modules\sd_samplers_common.py", line 263, in launch_sampling
return func()
File "F:\Forge\modules\sd_samplers_kdiffusion.py", line 251, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "F:\Forge\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\Forge\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\modules\sd_samplers_cfg_denoiser.py", line 182, in forward
denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params,
File "F:\Forge\modules_forge\forge_sampler.py", line 88, in forge_sample
denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed)
File "F:\Forge\ldm_patched\modules\samplers.py", line 289, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options)
File "F:\Forge\ldm_patched\modules\samplers.py", line 258, in calc_cond_uncond_batch
output = model.apply_model(input_x, timestep, **c).chunk(batch_chunks)
File "F:\Forge\ldm_patched\modules\model_base.py", line 90, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 905, in forward_webui
raise e
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 902, in forward_webui
return forward(*args, **kwargs)
File "F:\Forge\extensions\sd-webui-controlnet-main\scripts\hook.py", line 800, in forward
emb = self.time_embed(t_emb)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\container.py", line 215, in forward
input = module(input)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\Forge\ldm_patched\modules\ops.py", line 98, in forward
return super().forward(*args, **kwargs)
File "F:\Forge\venv\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 must have the same dtype, but got Float and Half
mat1 and mat2 must have the same dtype, but got Float and Half
*** Error completing request
*** Arguments: ('task(b2b0ehyxjn8aeke)', <gradio.routes.Request object at 0x000001F6F10C5AB0>, 'score_9, score_8_up, score_7_up, score_6_up, score_5_up, score_4_up,\nredhead girl, slim body, long hair, portrait, looking at viewer,', '', [], 20, 'Euler a', 1, 1, 4, 832, 1024, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, {'ad_model': 'face_yolov8n.pt', 'ad_model_classes': '', 'ad_tab_enable': True, 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.75, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.35, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': True, 'ad_inpaint_width': 1024, 'ad_inpaint_height': 1024, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=True, module='instant_id_face_embedding', model='ip-adapter_instant_id_sdxl [eb2d3ec0]', weight=1.0, image={'image': array([[[ 49, 52, 59],
*** [ 49, 52, 59],
*** [ 49, 52, 59],
*** ...,
*** [234, 237, 244],
*** [238, 239, 244],
*** [238, 239, 244]],
*** [[ 49, 52, 59],
*** [ 49, 52, 59],
*** [ 49, 52, 59],
*** ...,
*** [234, 237, 242],
*** [238, 239, 244],
*** [238, 239, 244]],
*** [[ 49, 52, 59],
*** [ 49, 52, 59],
*** [ 49, 52, 59],
*** ...,
*** [234, 237, 242],
*** [238, 239, 244],
*** [238, 239, 243]],
*** ...,
*** [[ 77, 45, 34],
*** [ 67, 37, 27],
*** [ 57, 30, 21],
*** ...,
*** [ 33, 32, 37],
*** [ 32, 33, 37],
*** [ 32, 33, 37]],
*** [[ 73, 43, 33],
*** [ 65, 37, 26],
*** [ 57, 30, 21],
*** ...,
*** [ 33, 32, 37],
*** [ 32, 33, 37],
*** [ 32, 33, 37]],
*** [[ 71, 41, 31],
*** [ 64, 36, 25],
*** [ 56, 29, 22],
*** ...,
*** [ 33, 32, 37],
*** [ 32, 33, 37],
*** [ 32, 33, 37]]], dtype=uint8), 'mask': array([[[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** ...,
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]]], dtype=uint8)}, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=True, processor_res=512, threshold_a=0.5, threshold_b=0.5, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), True, None, False, '0,1,2,3', '0,1,2,3', 'inswapper_128.onnx', 'CodeFormer', 0.9, True, 'None', 1, 1, False, True, 1, 0, 0, False, 1, True, False, 'CUDA', True, 0, 'None', '', None, False, False, 0.5, 0, 'tab_single', False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "F:\Forge\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
TypeError: 'NoneType' object is not iterable
The text was updated successfully, but these errors were encountered: