Skip to content

Commit

Permalink
add:auto translate chinese prompt to english
Browse files Browse the repository at this point in the history
  • Loading branch information
yolain committed Jun 3, 2024
1 parent ccb17f1 commit 1e9ffc5
Show file tree
Hide file tree
Showing 9 changed files with 316 additions and 8 deletions.
2 changes: 1 addition & 1 deletion README.en.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
- Background removal nodes for the RMBG-1.4 model supporting BriaAI, [BriaAI Guide](https://huggingface.co/briaai/RMBG-1.4)
- Forcibly cleared the memory usage of the comfy UI model are supported
- Stable Diffusion 3 multi-account API nodes are supported
-

## Changelog

**v1.1.8**
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,13 @@
- 支持 强制清理comfyUI模型显存占用
- 支持Stable Diffusion 3 多账号API节点
- 支持IC-Light的应用 [示例参考](https://github.com/yolain/ComfyUI-Yolain-Workflows?tab=readme-ov-file#2-5-ic-light) | [代码整合来源](https://github.com/huchenlei/ComfyUI-IC-Light) | [技术参考](https://github.com/lllyasviel/IC-Light)
- 中文提示词自动识别,使用[opus-mt-zh-en模型](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en)

## 更新日志

**v1.1.8**

- 增加中文提示词自动翻译,使用[opus-mt-zh-en模型](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en), 默认已对wildcard、lora正则处理, 其他需要保留的中文,可使用`@你的提示词@`包裹 (若依赖安装完成后报错, 请重启),测算大约会占0.3GB显存
- 增加 `easy controlnetStack` - controlnet堆
- 增加 `easy applyBrushNet` - [示例参考](https://github.com/yolain/ComfyUI-Yolain-Workflows/blob/main/workflows/2_advanced/2-4inpainting/2-4brushnet_1.1.8.json)
- 增加 `easy applyPowerPaint` - [示例参考](https://github.com/yolain/ComfyUI-Yolain-Workflows/blob/main/workflows/2_advanced/2-4inpainting/2-4powerpaint_outpaint_1.1.8.json)
Expand Down
16 changes: 16 additions & 0 deletions install.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
@echo off

set "requirements_txt=%~dp0\requirements.txt"
set "python_exec=..\..\..\python_embeded\python.exe"

echo Installing EasyUse Requirements...

if exist "%python_exec%" (
echo Installing with ComfyUI Portable
"%python_exec%" -s -m pip install -r "%requirements_txt%"
) else (
echo Installing with system Python
pip install -r "%requirements_txt%"
)

pause
1 change: 1 addition & 0 deletions prestartup_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def add_folder_path_and_extensions(folder_name, full_folder_paths, extensions):
add_folder_path_and_extensions("dynamicrafter_models", [os.path.join(model_path, "dynamicrafter_models")], folder_paths.supported_pt_extensions)
add_folder_path_and_extensions("mediapipe", [os.path.join(model_path, "mediapipe")], set(['.tflite','.pth']))
add_folder_path_and_extensions("inpaint", [os.path.join(model_path, "inpaint")], folder_paths.supported_pt_extensions)
add_folder_path_and_extensions("prompt_generator", [os.path.join(model_path, "prompt_generator")], folder_paths.supported_pt_extensions)

add_folder_path_and_extensions("checkpoints_thumb", [os.path.join(model_path, "checkpoints")], image_suffixs)
add_folder_path_and_extensions("loras_thumb", [os.path.join(model_path, "loras")], image_suffixs)
10 changes: 10 additions & 0 deletions py/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .libs.model import easyModelManager
from .libs.utils import getMetadata, cleanGPUUsedForce, get_local_filepath
from .libs.cache import remove_cache
from .libs.translate import has_chinese, zh_to_en

try:
import aiohttp
Expand All @@ -30,6 +31,15 @@ def cleanGPU(request):
return web.Response(status=500)
pass

@PromptServer.instance.routes.post("/easyuse/translate")
async def translate(request):
post = await request.post()
text = post.get("text")
if has_chinese(text):
return web.json_response({"text": zh_to_en([text])[0]})
else:
return web.json_response({"text": text})

@PromptServer.instance.routes.get("/easyuse/reboot")
def reboot(request):
try:
Expand Down
46 changes: 40 additions & 6 deletions py/easyNodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from .libs.controlnet import easyControlnet
from .libs.conditioning import prompt_to_cond, set_cond
from .libs.easing import EasingBase
from .libs.translate import has_chinese, zh_to_en
from .libs import cache as backend_cache

sampler = easySampler()
Expand All @@ -57,6 +58,8 @@ def INPUT_TYPES(s):

@staticmethod
def main(positive):
if has_chinese(positive):
return zh_to_en([positive])
return positive,

# 通配符提示词
Expand Down Expand Up @@ -86,8 +89,13 @@ def INPUT_TYPES(s):

CATEGORY = "EasyUse/Prompt"

@staticmethod
def main(*args, **kwargs):
def translate(self, text):
if has_chinese(text):
return zh_to_en([text])[0]
else:
return text

def main(self, *args, **kwargs):
prompt = kwargs["prompt"] if "prompt" in kwargs else None
seed = kwargs["seed"]

Expand All @@ -98,10 +106,15 @@ def main(*args, **kwargs):
text = kwargs['text']
if "multiline_mode" in kwargs and kwargs["multiline_mode"]:
populated_text = []
_text = []
text = text.split("\n")
for t in text:
t = self.translate(t)
_text.append(t)
populated_text.append(process(t, seed))
text = _text
else:
text = self.translate(text)
populated_text = [process(text, seed)]
text = [text]
return {"ui": {"value": [seed]}, "result": (text, populated_text)}
Expand All @@ -126,7 +139,10 @@ def INPUT_TYPES(s):

@staticmethod
def main(negative):
return negative,
if has_chinese(negative):
return zh_to_en([negative])
else:
return negative,

# 风格提示词选择器
class stylesPromptSelector:
Expand Down Expand Up @@ -262,6 +278,8 @@ def INPUT_TYPES(s):
CATEGORY = "EasyUse/Prompt"

def doit(self, prompt, main, lighting):
if has_chinese(prompt):
prompt = zh_to_en([prompt])[0]
if lighting != 'none' and main != 'none':
prompt = main + ',' + lighting + ',' + prompt
elif lighting != 'none' and main == 'none':
Expand Down Expand Up @@ -305,6 +323,8 @@ def run(self, **kwargs):

# Only process string input ports.
if isinstance(v, str) and v != '':
if has_chinese(v):
v = zh_to_en([v])[0]
prompts.append(v)

return (prompts, prompts)
Expand Down Expand Up @@ -332,14 +352,14 @@ def INPUT_TYPES(s):

def generate_strings(self, prompt, start_index, max_rows, workflow_prompt=None, my_unique_id=None):
lines = prompt.split('\n')
lines = [zh_to_en([v])[0] if has_chinese(v) else v for v in lines]

start_index = max(0, min(start_index, len(lines) - 1))

end_index = min(start_index + max_rows, len(lines))

rows = lines[start_index:end_index]


return (rows, rows)

class promptConcat:
Expand Down Expand Up @@ -906,11 +926,11 @@ def INPUT_TYPES(cls):
"empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),

"positive": ("STRING", {"default":"", "placeholder": "Positive", "multiline": True}),
"positive": ("STRING", {"default": "", "placeholder": "Positive", "multiline": True}),
"positive_token_normalization": (["none", "mean", "length", "length+mean"],),
"positive_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),

"negative": ("STRING", {"default":"", "placeholder": "Negative", "multiline": True}),
"negative": ("STRING", {"default": "", "placeholder": "Negative", "multiline": True}),
"negative_token_normalization": (["none", "mean", "length", "length+mean"],),
"negative_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),

Expand Down Expand Up @@ -1197,6 +1217,9 @@ def adv_pipeloader(self, stage_c, stage_b, stage_a, clip_name, lora_name, lora_m

log_node_warn("正在处理提示词...")
positive_seed = find_wildcards_seed(my_unique_id, positive, prompt)
# Translate cn to en
if has_chinese(positive):
positive = zh_to_en([positive])[0]
model_c, clip, positive, positive_decode, show_positive_prompt, pipe_lora_stack = process_with_loras(positive,
model_c, clip,
"positive",
Expand All @@ -1206,6 +1229,9 @@ def adv_pipeloader(self, stage_c, stage_b, stage_a, clip_name, lora_name, lora_m
easyCache)
positive_wildcard_prompt = positive_decode if show_positive_prompt or is_positive_linked_styles_selector else ""
negative_seed = find_wildcards_seed(my_unique_id, negative, prompt)
# Translate cn to en
if has_chinese(negative):
negative = zh_to_en([negative])[0]
model_c, clip, negative, negative_decode, show_negative_prompt, pipe_lora_stack = process_with_loras(negative,
model_c, clip,
"negative",
Expand Down Expand Up @@ -1572,11 +1598,15 @@ def adv_pipeloader(self, ckpt_name, vae_name, clip_name, init_image, resolution,
if clip_name == 'None':
raise Exception("You need choose a open_clip model when positive is not empty")
clip = easyCache.load_clip(clip_name)
if has_chinese(optional_positive):
optional_positive = zh_to_en([optional_positive])[0]
positive_embeddings_final, = CLIPTextEncode().encode(clip, optional_positive)
positive, = ConditioningConcat().concat(positive, positive_embeddings_final)
if optional_negative is not None and optional_negative != '':
if clip_name == 'None':
raise Exception("You need choose a open_clip model when negative is not empty")
if has_chinese(optional_negative):
optional_positive = zh_to_en([optional_negative])[0]
negative_embeddings_final, = CLIPTextEncode().encode(clip, optional_negative)
negative, = ConditioningConcat().concat(negative, negative_embeddings_final)

Expand Down Expand Up @@ -1741,8 +1771,12 @@ def adv_pipeloader(self, model_name, clip_skip, init_image, resolution, empty_la
clipped.clip_layer(clip_skip)

if positive is not None and positive != '':
if has_chinese(positive):
positive = zh_to_en([positive])[0]
positive_embeddings_final, = CLIPTextEncode().encode(clipped, positive)
if negative is not None and negative != '':
if has_chinese(negative):
negative = zh_to_en([negative])[0]
negative_embeddings_final, = CLIPTextEncode().encode(clipped, negative)

image = easySampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0)))
Expand Down
6 changes: 6 additions & 0 deletions py/libs/conditioning.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from .utils import find_wildcards_seed, find_nearest_steps, is_linked_styles_selector
from .log import log_node_warn
from .translate import zh_to_en, has_chinese
from .wildcards import process_with_loras
from .adv_encode import advanced_encode

Expand All @@ -9,6 +10,11 @@ def prompt_to_cond(type, model, clip, clip_skip, lora_stack, text, prompt_token_
styles_selector = is_linked_styles_selector(prompt, my_unique_id, type)
title = "正面提示词" if type == 'positive' else "负面提示词"
log_node_warn("正在进行" + title + "...")

# Translate cn to en
if has_chinese(text):
text = zh_to_en([text])[0]

positive_seed = find_wildcards_seed(my_unique_id, text, prompt)
model, clip, text, cond_decode, show_prompt, pipe_lora_stack = process_with_loras(
text, model, clip, type, positive_seed, can_load_lora, lora_stack, easyCache)
Expand Down
Loading

0 comments on commit 1e9ffc5

Please sign in to comment.