Skip to content

Commit

Permalink
0.35.1
Browse files Browse the repository at this point in the history
  • Loading branch information
shadowcz007 committed Aug 4, 2024
1 parent fe57286 commit 05b3088
Show file tree
Hide file tree
Showing 9 changed files with 541 additions and 388 deletions.
9 changes: 6 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
![](https://img.shields.io/github/release/shadowcz007/comfyui-mixlab-nodes)

> 适配了最新版 comfyui 的 py3.11 ,torch 2.1.2+cu121
> 适配了最新版 comfyui 的 py3.11 ,torch 2.3.1+cu121
> [Mixlab nodes discord](https://discord.gg/cXs9vZSqeK)

Expand All @@ -10,6 +10,8 @@

![alt text](./assets/1722517810720.png)

- 增加 API Key Input 节点,用于管理LLM的Key,同时优化LLM相关节点,为后续agent模式做准备

- 增加 SiliconflowLLM,可以使用由Siliconflow提供的免费LLM

- 增加 Edit Mask,方便在生成的时候手动绘制 mask [workflow](./workflow/edit-mask-workflow.json)
Expand Down Expand Up @@ -116,9 +118,10 @@ https://github.com/shadowcz007/comfyui-mixlab-nodes/assets/12645064/e7e77f90-e43

> Support for calling multiple GPTs.Local LLM(llama.cpp)、 ChatGPT、ChatGLM3 、ChatGLM4 , Some code provided by rui. If you are using OpenAI's service, fill in https://api.openai.com/v1 . If you are using a local LLM service, fill in http://127.0.0.1:xxxx/v1 . Azure OpenAI:https://xxxx.openai.azure.com
![gpt-workflow.svg](./assets/gpt-workflow.svg)
[LLM_base_workflow](./workflow/LLM_base_workflow.json)

[workflow-5](./workflow/5-gpt-workflow.json)
- SiliconflowLLM
- ChatGPTOpenAI

<!-- 最新:ChatGPT 节点支持 Local LLM(llama.cpp),Phi3、llama3 都可以直接一个节点运行了。
Expand Down
1 change: 1 addition & 0 deletions __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1106,6 +1106,7 @@ def mix_status(request):

"Color":"Color Input ♾️MixlabApp",
"TextInput_":"Text Input ♾️MixlabApp",
"KeyInput":"API Key Input ♾️MixlabApp",
"FloatSlider":"Float Slider Input ♾️MixlabApp",
"IntNumber":"Int Input ♾️MixlabApp",
"ImagesPrompt_":"Images Input ♾️MixlabApp",
Expand Down
95 changes: 76 additions & 19 deletions nodes/ChatGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,36 @@ def chat(client, model_name,messages ):
return content


llm_apis=[
{
"value": "https://api.openai.com/v1",
"label": "openai"
},
{
"value": "https://openai.api2d.net/v1",
"label": "api2d"
},
# {
# "value": "https://docs-test-001.openai.azure.com",
# "label": "https://docs-test-001.openai.azure.com"
# },

{
"value": "https://api.moonshot.cn/v1",
"label": "Kimi"
},
{
"value": "https://api.deepseek.com/v1",
"label": "DeepSeek-V2"
},
{
"value": "https://api.siliconflow.cn/v1",
"label": "SiliconCloud"
}]

llm_apis_dict = {api["label"]: api["value"] for api in llm_apis}


class ChatGPTNode:
def __init__(self):
# self.__client = OpenAI()
Expand All @@ -216,8 +246,9 @@ def __init__(self):

@classmethod
def INPUT_TYPES(cls):

model_list=[
"gpt-3.5-turbo",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4o",
"gpt-4o-2024-05-13",
Expand All @@ -243,25 +274,32 @@ def INPUT_TYPES(cls):
"01-ai/Yi-1.5-9B-Chat-16K",
"meta-llama/Meta-Llama-3.1-8B-Instruct"
]

return {
"required": {
"api_key":("KEY", {"default": "", "multiline": True,"dynamicPrompts": False}),
"api_url":("URL", {"default": "", "multiline": True,"dynamicPrompts": False}),
# "api_key":("KEY", {"default": "", "multiline": True,"dynamicPrompts": False}),
# "api_key":("STRING", {"forceInput": True,}),

"prompt": ("STRING", {"multiline": True,"dynamicPrompts": False}),
"system_content": ("STRING",
{
"default": "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.",
"multiline": True,"dynamicPrompts": False
}),

"model": ( model_list,
{"default": model_list[0]}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "step": 1}),
"context_size":("INT", {"default": 1, "min": 0, "max":30, "step": 1}),
"api_url":(list(llm_apis_dict.keys()),
{"default": list(llm_apis_dict.keys())[0]}),
},
"hidden": {
"unique_id": "UNIQUE_ID",
"extra_pnginfo": "EXTRA_PNGINFO",
},
"optional":{
"api_key":("STRING", {"forceInput": True,}),
"custom_model_name":("STRING", {"forceInput": True,}), #适合自定义model
"custom_api_url":("STRING", {"forceInput": True,}), #适合自定义model
},

}

RETURN_TYPES = ("STRING","STRING","STRING",)
Expand All @@ -273,12 +311,29 @@ def INPUT_TYPES(cls):


def generate_contextual_text(self,
api_key,
api_url,
# api_key,
prompt,
system_content,
model,
seed,context_size,unique_id = None, extra_pnginfo=None):
model,
seed,
context_size,
api_url,
api_key=None,
custom_model_name=None,
custom_api_url=None,
):

if custom_model_name!=None:
model=custom_model_name

api_url=llm_apis_dict[api_url] if api_url in llm_apis_dict else ""

if custom_api_url!=None:
api_url=custom_api_url

if api_key==None:
api_key="lm_studio"

# print(api_key!='',api_url,prompt,system_content,model,seed)
# 可以选择保留会话历史以维持上下文记忆
# 或者在此处清除会话历史 self.session_history.clear()
Expand All @@ -291,7 +346,7 @@ def generate_contextual_text(self,
self.system_content=system_content
# self.session_history=[]
# self.session_history.append({"role": "system", "content": system_content})

print("api_key,api_url",api_key,api_url)
#
if is_azure_url(api_url):
client=azure_client(api_key,api_url)
Expand Down Expand Up @@ -359,7 +414,7 @@ def INPUT_TYPES(cls):
]
return {
"required": {
"api_key":("KEY", {"default": "", "multiline": True,"dynamicPrompts": False}),
"api_key":("STRING", {"forceInput": True,}),
"prompt": ("STRING", {"multiline": True,"dynamicPrompts": False}),
"system_content": ("STRING",
{
Expand All @@ -371,10 +426,9 @@ def INPUT_TYPES(cls):
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "step": 1}),
"context_size":("INT", {"default": 1, "min": 0, "max":30, "step": 1}),
},
"hidden": {
"unique_id": "UNIQUE_ID",
"extra_pnginfo": "EXTRA_PNGINFO",
},
"optional":{
"custom_model_name":("STRING", {"forceInput": True,}), #适合自定义model
},
}

RETURN_TYPES = ("STRING","STRING","STRING",)
Expand All @@ -390,8 +444,11 @@ def generate_contextual_text(self,
prompt,
system_content,
model,
seed,context_size,unique_id = None, extra_pnginfo=None):

seed,context_size,custom_model_name=None):

if custom_model_name!=None:
model=custom_model_name

api_url="https://api.siliconflow.cn/v1"

# 把系统信息和初始信息添加到会话历史中
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
[project]
name = "comfyui-mixlab-nodes"
description = "3D, ScreenShareNode & FloatingVideoNode, SpeechRecognition & SpeechSynthesis, GPT, LoadImagesFromLocal, Layers, Other Nodes, ..."
version = "0.34.0"
license = { text = "MIT License" }
version = "0.35.1"
license = "MIT"
dependencies = ["numpy", "pyOpenSSL", "watchdog", "opencv-python-headless", "matplotlib", "openai", "simple-lama-inpainting", "clip-interrogator==0.6.0", "transformers>=4.36.0", "lark-parser", "imageio-ffmpeg", "rembg[gpu]", "omegaconf==2.3.0", "Pillow>=9.5.0", "einops==0.7.0", "trimesh>=4.0.5", "huggingface-hub", "scikit-image"]

[project.urls]
Expand Down
2 changes: 1 addition & 1 deletion web/javascript/checkVersion_mixlab.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import { app } from '../../../scripts/app.js'
const repoOwner = 'shadowcz007' // 替换为仓库的所有者
const repoName = 'comfyui-mixlab-nodes' // 替换为仓库的名称

const version = 'v0.34.0'
const version = 'v0.35.1'

fetch(`https://api.github.com/repos/${repoOwner}/${repoName}/releases/latest`)
.then(response => response.json())
Expand Down
Loading

0 comments on commit 05b3088

Please sign in to comment.