You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello. This is my first bug report ever :D
Please let me know if this is not relevant or any other issues.
I have recently updated Model Mixer and my routine of playing with Dare merges broke. Here is what I used to do:
Set up Model A to a pony model,
Use GPU, Fast Rebasin
Set Model B to a pony model
Set Dare merge (Lambda = 1.0)
Use MBW - ALL
Enable XYZ:
X: Model Mixer Model B (2-3 models)
Y: Model Mixer MBW Alpha B (For example = "1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1","0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1","0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1","0,1,1,1,1,1,1,1,1,0,1,0.7,1,1,1,1,1,1,1,1","0,1,1,1,1,1,1,1,1,0,1,0.5,1,1,1,1,1,1,1,1","0,1,1,1,1,1,1,1,1,0,1,0.3,1,1,1,1,1,1,1,1","0,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1")
merge processing in 3.0s (prepare: 0.1s, merging: 2.9s).
loading scripts.patches...
lora patch
Textencoder(BASE) has been successfully updated
update UNet block input_blocks.0.
update UNet block input_blocks.1.
update UNet block input_blocks.2.
update UNet block input_blocks.3.
update UNet block input_blocks.4.
update UNet block input_blocks.5.
update UNet block input_blocks.6.
update UNet block input_blocks.7.
update UNet block input_blocks.8.
update UNet block middle_block.
update UNet block output_blocks.0.
update UNet block output_blocks.1.
update UNet block output_blocks.2.
update UNet block output_blocks.3.
update UNet block output_blocks.4.
update UNet block output_blocks.5.
update UNet block output_blocks.6.
update UNet block output_blocks.7.
update UNet block output_blocks.8.
update UNet block time_embed.
update UNet block out.
UNet partial blocks have been successfully updated
Reload full state_dict...
remove old checkpointinfo
WARN: workaround load_model() to fix 'copy out of meta tensor' error (launched without --medvram cmd option case.)
*** Error running before_process: D:\Programs\SD\webui\extensions\sd-webui-model-mixer\scripts\model_mixer.py
Traceback (most recent call last):
File "D:\Programs\SD\webui\modules\scripts.py", line 824, in before_process
script.before_process(p, *script_args)
File "D:\Programs\SD\webui\extensions\sd-webui-model-mixer\scripts\model_mixer.py", line 4816, in before_process
sd_models.unload_model_weights()
File "D:\Programs\SD\webui\modules\sd_models.py", line 1006, in unload_model_weights
send_model_to_cpu(sd_model or shared.sd_model)
File "D:\Programs\SD\webui\modules\sd_models.py", line 743, in send_model_to_cpu
m.to(devices.cpu)
File "D:\Programs\SD\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py", line 54, in to
return super().to(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1160, in to
return self._apply(convert)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 810, in _apply
module._apply(fn)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 810, in _apply
module._apply(fn)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 810, in _apply
module._apply(fn)
[Previous line repeated 1 more time]
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 833, in _apply
param_applied = fn(param)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1158, in convert
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
NotImplementedError: Cannot copy out of meta tensor; no data!
generating image for xyz plot: NotImplementedError
Traceback (most recent call last):
File "D:\Programs\SD\webui\scripts\xyz_grid.py", line 728, in cell
res = process_images(pc)
File "D:\Programs\SD\webui\modules\processing.py", line 847, in process_images
res = process_images_inner(p)
File "D:\Programs\SD\webui\modules\processing.py", line 966, in process_images_inner
p.setup_conds()
File "D:\Programs\SD\webui\modules\processing.py", line 1520, in setup_conds
super().setup_conds()
File "D:\Programs\SD\webui\modules\processing.py", line 503, in setup_conds
self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
File "D:\Programs\SD\webui\modules\processing.py", line 488, in get_conds_with_caching
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
File "D:\Programs\SD\webui\modules\prompt_parser.py", line 261, in get_multicond_learned_conditioning
learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps, hires_steps, use_old_scheduling)
File "D:\Programs\SD\webui\modules\prompt_parser.py", line 188, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "D:\Programs\SD\webui\modules\sd_models_xl.py", line 32, in get_learned_conditioning
c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\webui\repositories\generative-models\sgm\modules\encoders\modules.py", line 141, in forward
emb_out = embedder(batch[embedder.input_key])
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\webui\repositories\generative-models\sgm\util.py", line 59, in do_autocast
return f(*args, **kwargs)
File "D:\Programs\SD\webui\repositories\generative-models\sgm\modules\encoders\modules.py", line 391, in forward
outputs = self.transformer(
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 734, in forward
causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device)
File "D:\Programs\SD\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 684, in _make_causal_mask
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
I reinstalled Automatic1111 and Model Mixer, and still got the error.
What helped me to resolve the issue was to roll back to a September version of Model Mixer. Now it all works again.
Hello. This is my first bug report ever :D
Please let me know if this is not relevant or any other issues.
I have recently updated Model Mixer and my routine of playing with Dare merges broke. Here is what I used to do:
Here is the log of where it breaks:
Console Log
debugs = ['elemental merge']
use_extra_elements = True, ?it/s]
XYZ: mbw alpha b -> 0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1
config hash = 552bbe35376b043403c508fac3e8107c7c3173709c786d4ae6e605d31dd7481a
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|███████████████████████████████████████████████████████████████████| 218/218 [00:00<00:00, 851.34it/s]
Check uninitialized support mbw presets #2/3: 100%|██████████████████████████████████████████████████| 218/218 [00:00<00:00, 220062.16it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 252/252 [00:00<00:00, 36100.98it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
Applying attention optimization: Doggettx... done.
100%|██████████████████████████████████████████████████████████████████████████████████| 35/35 [01:33<00:00, 2.66s/it]
debugs = ['elemental merge']█████████████████████████ | 35/70 [01:40<01:34, 2.70s/it]
use_extra_elements = True
XYZ: mbw alpha b -> 0,1,1,1,1,1,1,1,1,0,1,0.7,1,1,1,1,1,1,1,1
config hash = c944033c76af9bc58de90a62672e9b3e703614557f04b788a679caa202e67798
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.7, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|████████████████████████████████████████████████████████████████████| 218/218 [00:05<00:00, 42.56it/s]
Check uninitialized support mbw presets #2/3: 100%|██████████████████████████████████████████████████| 218/218 [00:00<00:00, 218536.87it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 252/252 [00:00<00:00, 31510.74it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
Applying attention optimization: Doggettx... done.
100%|██████████████████████████████████████████████████████████████████████████████████| 35/35 [01:33<00:00, 2.66s/it]
Total progress: 100%|██████████████████████████████████████████████████████████████████| 70/70 [03:38<00:00, 3.13s/it]
X/Y/Z plot will create 16 images on 1 2x2 grid; 4 images per cell. (Total steps to process: 140)03:38<00:00, 2.70s/it]
debugs = ['elemental merge']
use_extra_elements = True, ?it/s]
XYZ: mbw alpha b -> 0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1
config hash = ada4d1f16841761a82b7b8ede66aa354f9008f3c40c0510558ca546910a65bd3
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|█████████████████████████████████████████████████████████████████| 1460/1460 [00:01<00:00, 875.32it/s]
Check uninitialized support mbw presets #2/3: 100%|████████████████████████████████████████████████| 1460/1460 [00:00<00:00, 291451.33it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 252/252 [00:00<00:00, 36012.42it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
Applying attention optimization: Doggettx... done.
100%|██████████████████████████████████████████████████████████████████████████████████| 35/35 [01:34<00:00, 2.71s/it]
debugs = ['elemental merge']████████▎ | 35/140 [01:55<04:48, 2.75s/it]
use_extra_elements = True
XYZ: mbw alpha b -> 0,1,1,1,1,1,1,1,1,0,1,0.7,1,1,1,1,1,1,1,1
config hash = 23f1aeafb1d704786feebdb4c6102b7ff900d8c00f41fd88385ffd87a1539fb7
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.7, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|████████████████████████████████████████████████████████████████████| 218/218 [00:05<00:00, 43.15it/s]
Check uninitialized support mbw presets #2/3: 100%|██████████████████████████████████████████████████████████████| 218/218 [00:00<?, ?it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 252/252 [00:00<00:00, 28157.19it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
Applying attention optimization: Doggettx... done.
100%|██████████████████████████████████████████████████████████████████████████████████| 35/35 [01:35<00:00, 2.72s/it]
debugs = ['elemental merge']████████████████████████▌ | 70/140 [03:45<03:12, 2.75s/it]
use_extra_elements = True
XYZ: mbw alpha b -> 0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1
config hash = ada4d1f16841761a82b7b8ede66aa354f9008f3c40c0510558ca546910a65bd3
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|██████████████████████████████████████████████████████████████████| 218/218 [00:00<00:00, 1028.62it/s]
Check uninitialized support mbw presets #2/3: 100%|██████████████████████████████████████████████████| 218/218 [00:00<00:00, 218641.38it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 252/252 [00:00<00:00, 36099.75it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
Applying attention optimization: Doggettx... done.
100%|██████████████████████████████████████████████████████████████████████████████████| 35/35 [01:35<00:00, 2.72s/it]
debugs = ['elemental merge']████████████████████████████████████████ | 105/140 [05:31<01:36, 2.75s/it]
use_extra_elements = True
XYZ: mbw alpha b -> 0,1,1,1,1,1,1,1,1,0,1,0.7,1,1,1,1,1,1,1,1
config hash = 23f1aeafb1d704786feebdb4c6102b7ff900d8c00f41fd88385ffd87a1539fb7
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.7, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|████████████████████████████████████████████████████████████████████| 218/218 [00:05<00:00, 42.97it/s]
Check uninitialized support mbw presets #2/3: 100%|██████████████████████████████████████████████████| 218/218 [00:00<00:00, 218589.12it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 252/252 [00:00<00:00, 28019.10it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
Applying attention optimization: Doggettx... done.
100%|██████████████████████████████████████████████████████████████████████████████████| 35/35 [01:34<00:00, 2.71s/it]
Total progress: 100%|████████████████████████████████████████████████████████████████| 140/140 [07:35<00:00, 3.25s/it]
X/Y/Z plot will create 84 images on 1 7x3 grid; 4 images per cell. (Total steps to process: 735)07:35<00:00, 2.74s/it]
debugs = ['elemental merge']
use_extra_elements = True, ?it/s]
XYZ: mbw alpha b -> 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
config hash = 70cad8728437980840661c1ed79d7204dbb5d940362e26893a79e862f28009d8
model_a = butapony_v20
sdversion = XL
compact_mode = True
mode = Dare-Fixed, mbw mode, alpha = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Stage Issue where model in merged but not save leads to "Cannot copy out of meta tensor; no data!" - persists through restart - had to disable app to continue #1/3: 100%|█████████████████████████████████████████████████████████████████| 2260/2260 [00:02<00:00, 873.02it/s]
Check uninitialized support mbw presets #2/3: 100%|████████████████████████████████████████████████| 2260/2260 [00:00<00:00, 282411.05it/s]
Save unchanged weights Add to A1111 Extension List #3/3: 100%|████████████████████████████████████████████████| 255/255 [00:00<00:00, 31988.86it/s]
Broken clip!
[3, 6, 12, 23, 24, 29, 41, 46, 48, 51, 53, 58, 63]
WARN: workaround load_model() to fix 'copy out of meta tensor' error (launched without --medvram cmd option case.)
*** Error running before_process: D:\Programs\SD\webui\extensions\sd-webui-model-mixer\scripts\model_mixer.py
Traceback (most recent call last):
File "D:\Programs\SD\webui\modules\scripts.py", line 824, in before_process
script.before_process(p, *script_args)
File "D:\Programs\SD\webui\extensions\sd-webui-model-mixer\scripts\model_mixer.py", line 4816, in before_process
sd_models.unload_model_weights()
File "D:\Programs\SD\webui\modules\sd_models.py", line 1006, in unload_model_weights
send_model_to_cpu(sd_model or shared.sd_model)
File "D:\Programs\SD\webui\modules\sd_models.py", line 743, in send_model_to_cpu
m.to(devices.cpu)
File "D:\Programs\SD\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py", line 54, in to
return super().to(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1160, in to
return self._apply(convert)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 810, in _apply
module._apply(fn)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 810, in _apply
module._apply(fn)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 810, in _apply
module._apply(fn)
[Previous line repeated 1 more time]
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 833, in _apply
param_applied = fn(param)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1158, in convert
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
NotImplementedError: Cannot copy out of meta tensor; no data!
generating image for xyz plot: NotImplementedError
Traceback (most recent call last):
File "D:\Programs\SD\webui\scripts\xyz_grid.py", line 728, in cell
res = process_images(pc)
File "D:\Programs\SD\webui\modules\processing.py", line 847, in process_images
res = process_images_inner(p)
File "D:\Programs\SD\webui\modules\processing.py", line 966, in process_images_inner
p.setup_conds()
File "D:\Programs\SD\webui\modules\processing.py", line 1520, in setup_conds
super().setup_conds()
File "D:\Programs\SD\webui\modules\processing.py", line 503, in setup_conds
self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
File "D:\Programs\SD\webui\modules\processing.py", line 488, in get_conds_with_caching
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
File "D:\Programs\SD\webui\modules\prompt_parser.py", line 261, in get_multicond_learned_conditioning
learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps, hires_steps, use_old_scheduling)
File "D:\Programs\SD\webui\modules\prompt_parser.py", line 188, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "D:\Programs\SD\webui\modules\sd_models_xl.py", line 32, in get_learned_conditioning
c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\webui\repositories\generative-models\sgm\modules\encoders\modules.py", line 141, in forward
emb_out = embedder(batch[embedder.input_key])
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\webui\repositories\generative-models\sgm\util.py", line 59, in do_autocast
return f(*args, **kwargs)
File "D:\Programs\SD\webui\repositories\generative-models\sgm\modules\encoders\modules.py", line 391, in forward
outputs = self.transformer(
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\Programs\SD\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 734, in forward
causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device)
File "D:\Programs\SD\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 684, in _make_causal_mask
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
I reinstalled Automatic1111 and Model Mixer, and still got the error.
What helped me to resolve the issue was to roll back to a September version of Model Mixer. Now it all works again.
Could this be related to this commit? : 211e13c#diff-81675046c08690da75a12e021fb5086b96e41641277e8feff350ff94541cb218R4765
The text was updated successfully, but these errors were encountered: