You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
def ext_on_ui_settings():
# [setting_name], [default], [label], [component(blank is checkbox)], [component_args]debug_level_choices = []
xlclipskip_options = [
(OPT_NAME, False, "Enable the option to set clip skip setting for the small clip model in SDXL")
]
section = ('Enable clip-skip in SDXL', "Enable clip-skip in SDXL")
for cur_setting_name, *option_info in xlclipskip_options:
shared.opts.add_option(cur_setting_name, shared.OptionInfo(*option_info, section=section))
This extension is no longer needed as my pull request got accepted for the same function on the dev branch of stable-diffusion-webui. Thanks for the word though. Here is the merged pull request in case you want to merge it to your local repo and try it out. AUTOMATIC1111/stable-diffusion-webui#15992
import math
from collections import namedtuple
import torch
from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches
from modules import prompt_parser, sd_hijack, sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18, sd_emphasis
from modules.script_callbacks import on_ui_settings
from modules.sd_hijack import EmbeddingsWithFixes, model_hijack, apply_weighted_forward, undo_weighted_forward, weighted_forward
from modules.sd_hijack_clip import FrozenCLIPEmbedderWithCustomWords, FrozenCLIPEmbedderWithCustomWordsBase, PromptChunk
from modules.shared import opts
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
import ldm.modules.diffusionmodules.openaimodel
import ldm.models.diffusion.ddpm
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
import ldm.modules.encoders.modules
import sgm.modules.attention
import sgm.modules.diffusionmodules.model
import sgm.modules.diffusionmodules.openaimodel
import sgm.modules.encoders.modules
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
new memory efficient cross attention blocks do not support hypernets and we already
have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
silence new console spam from SD2
ldm.modules.attention.print = shared.ldm_print
ldm.modules.diffusionmodules.model.print = shared.ldm_print
ldm.util.print = shared.ldm_print
ldm.models.diffusion.ddpm.print = shared.ldm_print
optimizers = []
current_optimizer: sd_hijack_optimizations.SdOptimization = None
ldm_patched_forward = sd_unet.create_unet_forward(ldm.modules.diffusionmodules.openaimodel.UNetModel.forward)
ldm_original_forward = patches.patch(file, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", ldm_patched_forward)
sgm_patched_forward = sd_unet.create_unet_forward(sgm.modules.diffusionmodules.openaimodel.UNetModel.forward)
sgm_original_forward = patches.patch(file, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sgm_patched_forward)
OPT_NAME = "enable_xl_clip_skip"
def ext_on_ui_settings():
# [setting_name], [default], [label], [component(blank is checkbox)], [component_args]debug_level_choices = []
xlclipskip_options = [
(OPT_NAME, False, "Enable the option to set clip skip setting for the small clip model in SDXL")
]
section = ('Enable clip-skip in SDXL', "Enable clip-skip in SDXL")
on_ui_settings(ext_on_ui_settings)
def hijack(self, m):
conditioner = getattr(m, 'conditioner', None)
if conditioner:
text_cond_models = []
def undo_hijack(self, m):
conditioner = getattr(m, 'conditioner', None)
if conditioner:
for i in range(len(conditioner.embedders)):
embedder = conditioner.embedders[i]
if isinstance(embedder, (sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords, sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords)):
embedder.wrapped.model.token_embedding = embedder.wrapped.model.token_embedding.wrapped
conditioner.embedders[i] = embedder.wrapped
if isinstance(embedder, sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
embedder.wrapped.transformer.text_model.embeddings.token_embedding = embedder.wrapped.transformer.text_model.embeddings.token_embedding.wrapped
conditioner.embedders[i] = embedder.wrapped
class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords):
def init(self, wrapped, hijack):
super().init(wrapped, hijack)
Update the assignment to use the method from the class
sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords.encode_with_transformers = FrozenCLIPEmbedderForSDXLWithCustomWords.encode_with_transformers
sd_hijack.StableDiffusionModelHijack.hijack = hijack
sd_hijack.StableDiffusionModelHijack.undo_hijack = undo_hijack
Define the undo_optimizations function as a placeholder
def undo_optimizations():
pass
but you have to disable it in settings if you want to change models but it works
The text was updated successfully, but these errors were encountered: