diff --git a/.github/workflows/package_wheels.yml b/.github/workflows/package_wheels.yml index d2a9cd1..4f7eba5 100644 --- a/.github/workflows/package_wheels.yml +++ b/.github/workflows/package_wheels.yml @@ -35,9 +35,7 @@ jobs: - name: 📦 Building and Bundling wheels shell: bash run: | - python -m pip wheel --no-cache-dir -r requirements-wheels.txt -w ./wheels > build.log - - cat build.log + python -m pip wheel --no-cache-dir --no-deps -r requirements-wheels.txt -w ./wheels 2>&1 | tee build.log # find source wheels packages=$(cat build.log | awk -F 'Building wheels for collected packages: ' '{print $2}') diff --git a/.gitignore b/.gitignore index 80053fe..d5bec60 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ __pycache__ *.py[cod] *.onnx +wheels/ node_modules/ \ No newline at end of file diff --git a/INSTALL-CN.md b/INSTALL-CN.md new file mode 100644 index 0000000..934e540 --- /dev/null +++ b/INSTALL-CN.md @@ -0,0 +1,93 @@ +# 安装 +- [安装](#安装) + - [自动安装(推荐)](#自动安装推荐) + - [ComfyUI 管理器](#comfyui-管理器) + - [虚拟环境](#虚拟环境) + - [模型下载](#模型下载) + - [网络扩展](#网络扩展) + - [旧的安装方法 (MANUAL)](#旧的安装方法-manual) + - [依赖关系](#依赖关系) +### 自动安装(推荐) + +### ComfyUI 管理器 + +从 0.1.0 版开始,该扩展将使用 [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager) 进行安装,这对处理各种环境下的各种安装问题大有帮助。 + +### 虚拟环境 +还有一种试验性的单行安装方法,即在 ComfyUI 根目录下使用以下命令进行安装。它将下载代码、安装依赖项并运行安装脚本: + + +```bash +curl -sSL "https://raw.githubusercontent.com/username/repo/main/install.py" | python3 - +``` + +## 模型下载 +某些节点需要下载额外的模型,您可以使用与上述相同的 python 环境以交互方式完成下载: + +```bash +python scripts/download_models.py +``` + +然后根据提示或直接按回车键下载每个模型。 + +> **Note** +> 您可以使用以下方法下载所有型号,无需提示: + ```bash + python scripts/download_models.py -y + ``` + +#### 网络扩展 + +首次运行时,脚本会尝试将 [网络扩展](https://github.com/melMass/comfy_mtb/tree/main/web)链接到你的 "web/extensions "文件夹,[请参阅](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61)。 + +color widget preview + +### 旧的安装方法 (MANUAL) +### 依赖关系 +

Custom Virtualenv(我主要用这个)

+ +

Comfy 便携式/单机版(来自 ComfyUI 版本)

+ +如果您使用 ComfyUI 单机版中的 `python-embeded `,那么当二进制文件没有轮子时,您就无法使用 pip 安装二进制文件的依赖项,在这种情况下,请查看最近的 [发布](https://github.com/melMass/comfy_mtb/releases),那里有一个预编译轮子的 linux 和 windows 捆绑包(只有那些需要从源代码编译的轮子),请查看 [此问题 (#1)](https://github.com/melMass/comfy_mtb/issues/1) 以获取更多信息。 +![image](https://github.com/melMass/comfy_mtb/assets/7041726/2934fa14-3725-427c-8b9e-2b4f60ba1b7b) + + +
+ +

Google Colab

+ +在 **Run ComfyUI with localtunnel (Recommended Way)** 标题之后(代码单元格之前)添加一个新的代码单元格 + +![preview of where to add it on colab](https://github.com/melMass/comfy_mtb/assets/7041726/35df2ef1-14f9-44cd-aa65-353829188cd7) + + +```python +# download the nodes +!git clone --recursive https://github.com/melMass/comfy_mtb.git custom_nodes/comfy_mtb + +# download all models +!python custom_nodes/comfy_mtb/scripts/download_models.py -y + +# install the dependencies +!pip install -r custom_nodes/comfy_mtb/requirements.txt -f https://download.openmmlab.com/mmcv/dist/cu118/torch2.0/index.html +``` + +如果运行后 colab 抱怨需要重新启动运行时,请重新启动,然后不要重新运行之前的单元格,只运行运行本地隧道的单元格。(可能需要先添加一个包含 `%cd ComfyUI` 的单元格) + + +> **Note**: +> If you don't need all models, remove the `-y` as collab actually supports user input: ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06) + +> **Preview** +> ![image](https://github.com/melMass/comfy_mtb/assets/7041726/b5b2b2d9-f1e8-4c43-b1db-7dfc5e07be86) + +
+ diff --git a/INSTALL-JP.md b/INSTALL-JP.md new file mode 100644 index 0000000..f06dc6f --- /dev/null +++ b/INSTALL-JP.md @@ -0,0 +1,93 @@ +# インストール + +- [インストール](#インストール) + - [自動インストール (推奨)](#自動インストール-推奨) + - [ComfyUI マネージャ](#comfyui-マネージャ) + - [仮想環境](#仮想環境) + - [モデルのダウンロード](#モデルのダウンロード) + - [ウェブ拡張機能](#ウェブ拡張機能) + - [旧インストール方法 (MANUAL)](#旧インストール方法-manual) + - [依存関係](#依存関係) + + +## 自動インストール (推奨) + +### ComfyUI マネージャ + +バージョン0.1.0では、この拡張機能は[ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager)と一緒にインストールすることを想定しています。これは、様々な環境で直面する様々なインストール問題を処理するのに非常に役立ちます。 + +### 仮想環境 +また、ComfyUIのルートから以下のコマンドを使用する実験的なワンライナー・インストールもあります。これはコードをダウンロードし、依存関係をインストールし、インストールスクリプトを実行します: + +```bash +curl -sSL "https://raw.githubusercontent.com/username/repo/main/install.py" | python3 - +``` + +## モデルのダウンロード +ノードによっては、追加モデルのダウンロードが必要な場合があるので、上記と同じ python 環境を使って対話的に行うことができる: +```bash +python scripts/download_models.py +``` + +プロンプトに従うか、Enterを押すだけで全てのモデルをダウンロードできます。 + + +> **Note** +> プロンプトを出さずに全てのモデルをダウンロードするには、以下のようにします: + ```bash + python scripts/download_models.py -y + ``` + +### ウェブ拡張機能 + +初回実行時にスクリプトは[web extensions](https://github.com/melMass/comfy_mtb/tree/main/web)をあなたの快適な `web/extensions` フォルダに[シンボリックリンク](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61)しようとします。万が一失敗した場合は、mtbフォルダを手動で`ComfyUI/web/extensions`にコピーしてください: + +color widget preview + +## 旧インストール方法 (MANUAL) +### 依存関係 + +

カスタム Virtualenv (私は主にこれを使っています)

+ +1. ComfyUIで使用しているPython環境であることを確認してください。 +2. 以下のコマンドを実行して、必要な依存関係をインストールします: + ```bash + pip install -r comfy_mtb/requirements.txt + ``` + +
+ +

Comfy-portable / standalone (ComfyUI リリースより)

。 + +もしあなたがComfyUIスタンドアロンから`python-embeded`を使用している場合、バイナリがホイールを持っていない場合、依存関係をpipでインストールすることができません。この場合、最後の[リリース](https://github.com/melMass/comfy_mtb/releases)をチェックしてください。(ソースからのビルドが必要なもののみ)あらかじめビルドされたホイールがあるlinuxとwindows用のバンドルがあります。詳細は[この問題(#1)](https://github.com/melMass/comfy_mtb/issues/1)をチェックしてください。 + +![image](https://github.com/melMass/comfy_mtb/assets/7041726/2934fa14-3725-427c-8b9e-2b4f60ba1b7b) + +
+ +

Google Colab

+ +ComfyUI with localtunnel (Recommended Way)**ヘッダーのすぐ後(コードセルの前)に、新しいコードセルを追加してください。 +![colabに追加する場所のプレビュー](https://github.com/melMass/comfy_mtb/assets/7041726/35df2ef1-14f9-44cd-aa65-353829188cd7) + +```python +# download the nodes +!git clone --recursive https://github.com/melMass/comfy_mtb.git custom_nodes/comfy_mtb + +# download all models +!python custom_nodes/comfy_mtb/scripts/download_models.py -y + +# install the dependencies +!pip install -r custom_nodes/comfy_mtb/requirements.txt -f https://download.openmmlab.com/mmcv/dist/cu118/torch2.0/index.html +``` +これを実行した後、colabがランタイムを再起動する必要があると文句を言ったら、それを実行し、それ以前のセルは再実行せず、localtunnelを実行するセルだけを再実行してください。(最初に`%cd ComfyUI`のセルを追加する必要があるかもしれません...) + + +> **Note**: +> すべてのモデルが必要でない場合は、`-y`を削除してください : ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06) + +> **プレビュー** +> ![image](https://github.com/melMass/comfy_mtb/assets/7041726/b5b2b2d9-f1e8-4c43-b1db-7dfc5e07be86) + +
+ diff --git a/INSTALL.md b/INSTALL.md index 7d37e56..2bf392d 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,29 +1,68 @@ # Installation - [Installation](#installation) - - [Dependencies](#dependencies) - - [Custom virtualenv (I use this mainly)](#custom-virtualenv-i-use-this-mainly) - - [Comfy-portable / standalone (from ComfyUI releases)](#comfy-portable--standalone-from-comfyui-releases) - - [Google Colab](#google-colab) - - [Models Download](#models-download) + - [Automatic Install (Recommended)](#automatic-install-recommended) + - [ComfyUI Manager](#comfyui-manager) + - [Virtual Env](#virtual-env) + - [Models Download](#models-download) - [Web Extensions](#web-extensions) + - [Old installation method (MANUAL)](#old-installation-method-manual) + - [Dependencies](#dependencies) +## Automatic Install (Recommended) -### Dependencies +### ComfyUI Manager + +As of version 0.1.0, this extension is meant to be installed with the [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager), which helps a lot with handling the various install issues faced by various environments. -#### Custom virtualenv (I use this mainly) +### Virtual Env +There is also an experimental one liner install using the following command from ComfyUI's root. It will download the code, install the dependencies and run the install script: + +```bash +curl -sSL "https://raw.githubusercontent.com/username/repo/main/install.py" | python3 - +``` +## Models Download +Some nodes require extra models to be downloaded, you can interactively do it using the same python environment as above: +```bash +python scripts/download_models.py +``` + +then follow the prompt or just press enter to download every models. + +> **Note** +> You can use the following to download all models without prompt: + ```bash + python scripts/download_models.py -y + ``` + +### Web Extensions + +On first run the script [tries to symlink](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61) the [web extensions](https://github.com/melMass/comfy_mtb/tree/main/web) to your comfy `web/extensions` folder. In case it fails you can manually copy the mtb folder to `ComfyUI/web/extensions` it only provides a color widget for now shared by a few nodes: + +color widget preview + +## Old installation method (MANUAL) +### Dependencies +

Custom Virtualenv (I use this mainly)

+ 1. Make sure you are in the Python environment you use for ComfyUI. 2. Install the required dependencies by running the following command: ```bash pip install -r comfy_mtb/requirements.txt ``` -#### Comfy-portable / standalone (from ComfyUI releases) +
+ +

Comfy-portable / standalone (from ComfyUI releases)

+ If you use the `python-embeded` from ComfyUI standalone then you are not able to pip install dependencies with binaries when they don't have wheels, in this case check the last [release](https://github.com/melMass/comfy_mtb/releases) there is a bundle for linux and windows with prebuilt wheels (only the ones that require building from source), check [this issue (#1)](https://github.com/melMass/comfy_mtb/issues/1) for more info. ![image](https://github.com/melMass/comfy_mtb/assets/7041726/2934fa14-3725-427c-8b9e-2b4f60ba1b7b) -#### Google Colab + +
+ +

Google Colab

Add a new code cell just after the **Run ComfyUI with localtunnel (Recommended Way)** header (before the code cell) ![preview of where to add it on colab](https://github.com/melMass/comfy_mtb/assets/7041726/35df2ef1-14f9-44cd-aa65-353829188cd7) @@ -43,29 +82,10 @@ If after running this, colab complains about needing to restart runtime, do it, > **Note**: -> If you don't need all models, remove the `-y` as collab actually supportd user input: ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06) +> If you don't need all models, remove the `-y` as collab actually supports user input: ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06) > **Preview** > ![image](https://github.com/melMass/comfy_mtb/assets/7041726/b5b2b2d9-f1e8-4c43-b1db-7dfc5e07be86) +
- -### Models Download -Some nodes require extra models to be downloaded, you can interactively do it using the same python environment as above: -```bash -python scripts/download_models.py -``` - -then follow the prompt or just press enter to download every models. - -> **Note** -> You can use the following to download all models without prompt: - ```bash - python scripts/download_models.py -y - ``` - -### Web Extensions - -On first run the script [tries to symlink](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61) the [web extensions](https://github.com/melMass/comfy_mtb/tree/main/web) to your comfy `web/extensions` folder. In case it fails you can manually copy the mtb folder to `ComfyUI/web/extensions` it only provides a color widget for now shared by a few nodes: - -color widget preview diff --git a/README-CN.md b/README-CN.md new file mode 100644 index 0000000..106040c --- /dev/null +++ b/README-CN.md @@ -0,0 +1,102 @@ +## MTB Nodes + +Buy Me A Coffee + +欢迎使用 MTB Nodes 项目!这个代码库是开放的,您可以自由地探索和利用。它的主要目的是构建用于 [MLOPs](https://github.com/Bismuth-Consultancy-BV/MLOPs) 中的概念验证(POCs)。该项目中的许多节点都是受到现有社区贡献或内置功能的启发而创建的。 + +在继续之前,请注意与此项目中使用的某些库相关的许可证。例如,`deepbump` 库采用 [GPLv3](https://github.com/HugoTini/DeepBump/blob/master/LICENSE) 许可证。 + +- [MTB Nodes](#mtb-nodes) +- [安装](#安装) +- [节点列表](#节点列表) + - [bbox](#bbox) + - [colors](#colors) + - [人脸检测/交换](#人脸检测交换) + - [图像插值(动画)](#图像插值动画) + - [图像操作](#图像操作) + - [潜在变量工具](#潜在变量工具) + - [其他工具](#其他工具) + - [纹理](#纹理) +- [Comfy 资源](#comfy-资源) + + +## 安装 + +- 移至 [INSTALL-CN.md](./INSTALL-CN.md) + + +## 节点列表 + +### bbox +- `Bounding Box`: BBox 构造函数(自定义类型) +- `BBox From Mask`: 从遮罩中提取边界框 +- `Crop`: 根据边界框裁剪图像 +- `Uncrop`: 根据边界框还原图像 + +### colors +- `Colored Image`: 给定尺寸的纯色图像 +- `RGB to HSV`: - +- `HSV to RGB`: - +- `Color Correct`: 基本颜色校正工具 + + +### 人脸检测/交换 +- `Face Swap`: 使用 deepinsight/insightface 模型进行人脸交换(该节点在早期版本中称为 `Roop`,功能相同,`Roop` 只是使用这些模型的应用程序) + > **注意** + > 人脸索引允许您选择要替换的人脸,如下所示: + +- `Load Face Swap Model`: 加载 insightface 模型用于人脸交换 +- `Restore Face`: 使用 [GFPGan](https://github.com/TencentARC/GFPGAN) 还原人脸,与 `Face Swap` 配合使用效果很好,并支持 `bg_upscaler` 的 Comfy 原生放大器 + +### 图像插值(动画) +- `Load Film Model`: 加载 [FILM](https://github.com/google-research/frame-interpolation) 模型 +- `Film Interpolation`: 使用 [FILM](https://github.com/google-research/frame-interpolation) 处理输入帧 + +- `Export to Prores (experimental)`: 将输入帧导出为 ProRes 4444 mov 文件。这使用 ffmpeg stdin 发送原始的 NumPy 数组,与 `Film Interpolation` 一起使用,目前很简单,但可以进一步扩展。 + +### 图像操作 +- `Blur`: 使用高斯滤波器对图像进行模糊处理。 +- `Deglaze Image`: 从 [FN16](https://github.com/Fannovel16/FN16-ComfyUI-nodes/blob/main/DeglazeImage.py) 中提取 +- `Denoise`: 对输入图像进行降噪处理 +- `Image Compare`: 比较两个图像并返回差异图像 +- `Image Premultiply`: 使用掩码对图像进行预乘处理 +- `Image Remove Background Rembg`: 使用 [RemBG](https://github.com/danielgatis/rembg) 进行背景去除 + +- `Image Resize Factor`: 大部分提取自 [WAS Node Suite](https://github.com/WASasquatch/was-node-suite-comfyui),经过一些编辑(特别是支持多个图像)和较少的功能。 +- `Mask To Image`: 将遮罩(Alpha)转换为带有颜色和背景的 RGB 图像 +- `Save Image Grid`: 将输入批次中的所有图像保存为图像网格。 + +### 潜在变量工具 +- `Latent Lerp`: 两个潜在变量之间的线性插值(混合) + + +### 其他工具 +- `Concat Images`: 接受两个图像流,并将它们合并为其他 Comfy 管道支持的图像批次。 +- `Image Resize Factor`: **已弃用**,因为我后来发现了内 + +置的图像调整大小功能。 +- `Text To Image`: 使用字体将文本转换为图像的工具 +- `Styles Loader`: 加载 csv 文件并从行中填充下拉列表(类似于 A111) + +- `Smart Step`: 一个非常基本的节点,用于获取在 KSampler 高级中使用的步骤百分比 +- `Qr Code`: 基本的 QR Code 生成器 +- `Save Tensors`: 调试节点,将来可能会被删除 +- `Int to Number`: 用于 WASSuite 数字节点的补充 +- `Smart Step`: 使用百分比来控制 `KAdvancedSampler` 的步骤(开始/停止) + +### 纹理 + +- `DeepBump`: 从单张图片生成法线图和高度图 + +## Comfy 资源 + +**指南**: +- [官方示例(英文)](https://comfyanonymous.github.io/ComfyUI_examples/) +- @BlenderNeko 的[ComfyUI 社区手册(英文)](https://blenderneko.github.io/ComfyUI-docs/) + +- @tjhayasaka 的[Tomoaki 个人 Wiki(日文)](https://comfyui.creamlab.net/guides/) + +**扩展和自定义节点**: +- @WASasquatch 的[Comfy 列表插件(英文)](https://github.com/WASasquatch/comfyui-plugins) + +- [CivitAI 上的 ComfyUI 标签(英文)](https://civitai.com/tag/comfyui) \ No newline at end of file diff --git a/README-JP.md b/README-JP.md new file mode 100644 index 0000000..a48b5ef --- /dev/null +++ b/README-JP.md @@ -0,0 +1,101 @@ +## MTB Nodes + +Buy Me A Coffee + +MTB Nodesプロジェクトへようこそ!このコードベースは、自由に探索し、利用することができます。主な目的は、[MLOPs](https://github.com/Bismuth-Consultancy-BV/MLOPs)の実装のための概念実証(POC)を構築することです。このプロジェクトの多くのノードは、既存のコミュニティの貢献や組み込みの機能に触発されています。 + +続行する前に、このプロジェクトで使用されている特定のライブラリに関連するライセンスに注意してください。たとえば、「deepbump」ライブラリは、[GPLv3](https://github.com/HugoTini/DeepBump/blob/master/LICENSE)の下でライセンスされています。 + +- [MTB Nodes](#mtb-nodes) +- [インストール](#インストール) +- [ノードリスト](#ノードリスト) + - [bbox](#bbox) + - [colors](#colors) + - [顔検出 / スワッピング](#顔検出--スワッピング) + - [画像補間(アニメーション)](#画像補間アニメーション) + - [画像操作](#画像操作) + - [潜在的なユーティリティ](#潜在的なユーティリティ) + - [その他のユーティリティ](#その他のユーティリティ) + - [テクスチャ](#テクスチャ) +- [Comfyリソース](#comfyリソース) + + +## インストール + +- [INSTALL-JP.md](./INSTALL-JP.md)に移動しました。 + + +## ノードリスト + +### bbox +- `Bounding Box`: BBoxコンストラクタ(カスタムタイプ) +- `BBox From Mask`: マスクからバウンディングボックスを抽出 +- `Crop`: BBoxから画像を切り抜く +- `Uncrop`: BBoxから画像を元に戻す + +### colors +- `Colored Image`: 指定されたサイズの一定の色の画像 +- `RGB to HSV`: - +- `HSV to RGB`: - +- `Color Correct`: 基本的なカラーコレクションツール + + +### 顔検出 / スワッピング +- `Face Swap`: deepinsight/insightfaceモデルを使用した顔の入れ替え(このノードは初期バージョンでは「Roop」と呼ばれていましたが、同じ機能を提供します。Roopは単にこれらのモデルを使用するアプリです) + > **注意** + > 顔のインデックスを使用して置き換える顔を選択できます。以下を参照してください: + +- `Load Face Swap Model`: 顔の交換のためのinsightfaceモデルを読み込む +- `Restore Face`: [GFPGan](https://github.com/TencentARC/GFPGAN)を使用して顔を復元し、`Face Swap`と組み合わせて使用すると非常に効果的であり、`bg_upscaler`のComfyネイティブアップスケーラーもサポートしています。 + +### 画像補間(アニメーション) +- `Load Film Model`: [FILM](https://github.com/google-research/frame-interpolation)モデルを読み込む +- `Film Interpolation`: [FILM](https://github.com/google-research/frame-interpolation)を使用して入力フレームを処理する + +- `Export to Prores (experimental)`: 入力フレームをProRes 4444 movファイルにエクスポートします。これは現在は単純なものですが、`Film Interpolation`と組み合わせて使用するためのffmpegのstdinを使用して生のNumPy配列を送信するもので、拡張することもできます。 + +### 画像操作 +- `Blur`: ガウスフィルタを使用して画像をぼかす +- `Deglaze Image`: [FN16](https://github.com/Fannovel16/FN16-ComfyUI-nodes/blob/main/DeglazeImage.py)から取得 +- `Denoise`: 入力画像のノイズを除去する +- `Image Compare`: 2つの画像を比較し、差分画像を返す +- `Image Premultiply`: 画像をマスクで乗算 +- `Image Remove Background Rembg`: [RemBG](https://github.com/danielgatis/rembg)を使用した背景除去 + +- `Image Resize Factor`: [WAS Node Suite](https://github.com/WASasquatch/was-node-suite-comfyui)から抽出され、いくつかの編集(特に複数の画像のサポート)と機能の削減が行われました。 +- `Mask To Image`: マスク(アルファ)をカラーと背景を持つRGBイメージに変換します。 +- `Save Image Grid`: 入力バッチのすべての画像を画像グリッドとして保存します。 + +### 潜在的なユーティリティ +- `Latent Lerp`: 2つの潜在的なベクトルの間の線形補間(ブレンド) + +### その他のユーティリティ +- `Concat Images`: 2つの画像ストリームを取り、他のComfyパイプラインでサポートされている画像のバッチとしてマージします。 +- `Image Resize Factor`: **非推奨**。組み込みの画像リサイズ機能を発見したため、削除される予定です。 +- `Text To Image`: フォントを使用してテキストを画像に変換するためのユーティリティ +- `Styles Loader`: csvファイルをロードし、行からドロップダウンを作成します(A111のようなもの) + +- `Smart Step`: KSamplerの高度な使用に使用するステップパーセントを取得する非常に基本的なノード +- `Qr Code`: 基本的なQRコード生成器 +- `Save Tensors`: 将来的に削除される可能性のあるデバッグノード +- `Int to Number`: WASSuiteの数値ノードの補完 +- `Smart Step`: `KAdvancedSampler`のステップ(開始/停止)を制御するための非常に基本的なツールで、パーセンテージを使用します。 + +### テクスチャ + +- `DeepBump`: 1枚の画像から法線マップと高さマップを生成します。 + +## Comfyリソース + +**ガイド**: +- [公式の例(英語)](https://comfyanonymous.github.io/ComfyUI_examples/) +- @BlenderNekoによる[ComfyUIコミュニティマニュアル(英語)](https://blenderneko.github.io/ComfyUI-docs/) + +- @tjhayasakaによる[Tomoakiの個人Wiki(日本語)](https://comfyui.creamlab.net/guides/) + +**拡張機能とカスタムノード**: +- @WASasquatchによる[Comfyリスト用のプラグイン(英語)](https://github.com/WASasquatch/comfyui-plugins) + +- [CivitAIのComfyUIタグ(英語)](https://civitai.com/tag/comfyui) \ No newline at end of file diff --git a/__init__.py b/__init__.py index 977694d..2423e2b 100644 --- a/__init__.py +++ b/__init__.py @@ -1,17 +1,59 @@ +import os + +os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" + import traceback from .log import log, blue_text, cyan_text, get_summary, get_label from .utils import here import importlib import os +import ast +import json NODE_CLASS_MAPPINGS = {} NODE_DISPLAY_NAME_MAPPINGS = {} NODE_CLASS_MAPPINGS_DEBUG = {} +__version__ = "0.1.0" + + +def extract_nodes_from_source(filename): + source_code = "" + + with open(filename, "r") as file: + source_code = file.read() + + nodes = [] + + try: + parsed = ast.parse(source_code) + for node in ast.walk(parsed): + if isinstance(node, ast.Assign) and len(node.targets) == 1: + target = node.targets[0] + if isinstance(target, ast.Name) and target.id == "__nodes__": + value = ast.get_source_segment(source_code, node.value) + node_value = ast.parse(value).body[0].value + if isinstance(node_value, ast.List) or isinstance( + node_value, ast.Tuple + ): + for element in node_value.elts: + if isinstance(element, ast.Name): + print(element.id) + nodes.append(element.id) + + break + except SyntaxError: + log.error("Failed to parse") + pass # File couldn't be parsed + + return nodes + def load_nodes(): errors = [] nodes = [] + nodes_failed = [] + for filename in (here / "nodes").iterdir(): if filename.suffix == ".py": module_name = filename.stem @@ -22,17 +64,20 @@ def load_nodes(): ) _nodes = getattr(module, "__nodes__") nodes.extend(_nodes) - log.debug(f"Imported {module_name} nodes") except AttributeError: pass # wip nodes except Exception: error_message = traceback.format_exc().splitlines()[-1] - errors.append(f"Failed to import {module_name} because {error_message}") + errors.append( + f"Failed to import module {module_name} because {error_message}" + ) + # Read __nodes__ variable from the source file + nodes_failed.extend(extract_nodes_from_source(filename)) if errors: - log.error( + log.info( f"Some nodes failed to load:\n\t" + "\n\t".join(errors) + "\n\n" @@ -40,7 +85,7 @@ def load_nodes(): + "If you think this is a bug, please report it on the github page (https://github.com/melMass/comfy_mtb/issues)" ) - return nodes + return (nodes, nodes_failed) # - REGISTER WEB EXTENSIONS @@ -49,20 +94,39 @@ def load_nodes(): if web_mtb.exists(): log.debug(f"Web extensions folder found at {web_mtb}") + if not os.path.islink(web_mtb.as_posix()): + log.warn( + f"Web extensions folder at {web_mtb} is not a symlink, if updating please delete it before" + ) + + elif web_extensions_root.exists(): + web_tgt = here / "web" try: - os.symlink((here / "web"), web_mtb.as_posix()) + os.symlink(web_tgt.as_posix(), web_mtb.as_posix()) + except OSError: + log.warn(f"Failed to create symlink to {web_mtb}, trying to copy it") + try: + import shutil + + shutil.copytree(web_tgt, web_mtb) + log.info(f"Successfully copied {web_tgt} to {web_mtb}") + except Exception: + log.warn( + f"Failed to symlink and copy {web_tgt} to {web_mtb}. Please copy the folder manually." + ) + except Exception: # OSError - log.error( + log.warn( f"Failed to create symlink to {web_mtb}. Please copy the folder manually." ) else: - log.error( + log.warn( f"Comfy root probably not found automatically, please copy the folder {web_mtb} manually in the web/extensions folder of ComfyUI" ) # - REGISTER NODES -nodes = load_nodes() +nodes, failed = load_nodes() for node_class in nodes: class_name = node_class.__name__ node_label = f"{get_label(class_name)} (mtb)" @@ -72,6 +136,18 @@ def load_nodes(): # TODO: I removed this, I find it more convenient to write without spaces, but it breaks every of my workflows # TODO (cont): and until I find a way to automate the conversion, I'll leave it like this + if os.environ.get("MTB_EXPORT"): + with open(here / "node_list.json", "w") as f: + f.write( + json.dumps( + { + k: NODE_CLASS_MAPPINGS_DEBUG[k] + for k in sorted(NODE_CLASS_MAPPINGS_DEBUG.keys()) + }, + indent=4, + ) + ) + log.info( f"Loaded the following nodes:\n\t" + "\n\t".join( @@ -79,3 +155,98 @@ def load_nodes(): for k, doc in NODE_CLASS_MAPPINGS_DEBUG.items() ) ) + +# - ENDPOINT +from server import PromptServer +from .log import mklog, log +from aiohttp import web +from importlib import reload +import logging + +endlog = mklog("endpoint") + + +@PromptServer.instance.routes.get("/mtb/status") +async def get_full_library(request): + files = [] + endlog.debug("Getting status") + return web.json_response( + { + "registered": NODE_CLASS_MAPPINGS_DEBUG, + "failed": failed, + } + ) + + +@PromptServer.instance.routes.post("/mtb/debug") +async def set_debug(request): + json_data = await request.json() + enabled = json_data.get("enabled") + if enabled: + os.environ["MTB_DEBUG"] = "true" + log.setLevel(logging.DEBUG) + log.debug("Debug mode set") + + else: + if "MTB_DEBUG" in os.environ: + # del os.environ["MTB_DEBUG"] + os.environ.pop("MTB_DEBUG") + log.setLevel(logging.INFO) + + return web.json_response({"message": f"Debug mode {'set' if enabled else 'unset'}"}) + + +@PromptServer.instance.routes.get("/mtb") +async def get_home(request): + from . import endpoint + + reload(endpoint) + # Check if the request prefers HTML content + if "text/html" in request.headers.get("Accept", ""): + # # Return an HTML page + html_response = f""" + + """ + return web.Response( + text=endpoint.render_base_template("MTB", html_response), + content_type="text/html", + ) + + # Return JSON for other requests + return web.json_response({"message": "Welcome to MTB!"}) + + +@PromptServer.instance.routes.get("/mtb/debug") +async def get_debug(request): + from . import endpoint + + reload(endpoint) + enabled = False + if "MTB_DEBUG" in os.environ: + enabled = True + # Check if the request prefers HTML content + if "text/html" in request.headers.get("Accept", ""): + # # Return an HTML page + html_response = f""" +

MTB Debug Status: {'Enabled' if enabled else 'Disabled'}

+ """ + return web.Response( + text=endpoint.render_base_template("Debug", html_response), + content_type="text/html", + ) + + # Return JSON for other requests + return web.json_response({"enabled": enabled}) + + +# - WAS Dictionary +MANIFEST = { + "name": "MTB Nodes", # The title that will be displayed on Node Class menu,. and Node Class view + "version": (0, 1, 0), # Version of the custom_node or sub module + "author": "Mel Massadian", # Author or organization of the custom_node or sub module + "project": "https://github.com/melMass/comfy_mtb", # The address that the `name` value will link to on Node Class Views + "description": "Set of nodes that enhance your animation workflow and provide a range of useful tools including features such as manipulating bounding boxes, perform color corrections, swap faces in images, interpolate frames for smooth animation, export to ProRes format, apply various image operations, work with latent spaces, generate QR codes, and create normal and height maps for textures.", +} diff --git a/endpoint.py b/endpoint.py new file mode 100644 index 0000000..1da1315 --- /dev/null +++ b/endpoint.py @@ -0,0 +1,39 @@ +from .utils import here + + +def render_base_template(title, content): + css_content = "" + css_path = here / "html" / "style.css" + if css_path: + with open(css_path, "r") as css_file: + css_content = css_file.read() + + github_icon_svg = """""" + return f""" + + + + {title} + + + +
+ Comfy MTB Logo + Comfy MTB + + {github_icon_svg} + +
+ +
+ {content} +
+ + + + + """ diff --git a/examples/01-faceswap.json b/examples/01-faceswap.json new file mode 100644 index 0000000..605697a --- /dev/null +++ b/examples/01-faceswap.json @@ -0,0 +1,1012 @@ +{ + "last_node_id": 76, + "last_link_id": 161, + "nodes": [ + { + "id": 59, + "type": "Reroute", + "pos": [ + -150.35178124999982, + 644.4360633544919 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 124 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 139 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 56, + "type": "Reroute", + "pos": [ + -1580.8297949218763, + 644.7740239257807 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 117 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 124 + ] + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 57, + "type": "Reroute", + "pos": [ + -673.8297949218747, + -185.22597607421872 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 135 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 120 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 65, + "type": "Reroute", + "pos": [ + -1512.8297949218763, + -181.22597607421872 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 134 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 135 + ] + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -834.8297949218747, + 206.77402392578134 + ], + "size": [ + 210, + 54 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 142 + }, + { + "name": "text", + "type": "STRING", + "link": 87, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "worst quality, hands, embedding:EasyNegative," + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + -913.8297949218747, + 326.77402392578125 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 512, + 1 + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -1040, + -2 + ], + "size": [ + 422.84503173828125, + 164.31304931640625 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 141 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "Closeup portrait of an old bearded Caucasian man smiling, (NYC 1995), trench coat, golden ring, brown eyes" + ] + }, + { + "id": 16, + "type": "CheckpointLoaderSimple", + "pos": [ + -2001, + 193 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 134 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 141, + 142 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 117 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "revAnimated_v122.safetensors" + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + -483, + -21 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 120 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 138 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 542821171532982, + "fixed", + 45, + 8, + "dpmpp_sde", + "simple", + 1 + ] + }, + { + "id": 66, + "type": "VAEDecodeTiled", + "pos": [ + 205, + -28 + ], + "size": [ + 210, + 46 + ], + "flags": { + "collapsed": false + }, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 138 + }, + { + "name": "vae", + "type": "VAE", + "link": 139, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 153, + 161 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecodeTiled" + } + }, + { + "id": 25, + "type": "PreviewImage", + "pos": [ + 2338.9229387812507, + -632.8275743593749 + ], + "size": [ + 726.28564453125, + 475.3432312011719 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 150 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 75, + "type": "PreviewImage", + "pos": [ + 1573.5946044921875, + -629.80322265625 + ], + "size": [ + 691.7459716796875, + 479.6098327636719 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 156 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 70, + "type": "Restore Face (mtb)", + "pos": [ + 1899, + -51 + ], + "size": [ + 315, + 150 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 149 + }, + { + "name": "model", + "type": "FACEENHANCE_MODEL", + "link": 151, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 150 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Restore Face (mtb)" + }, + "widgets_values": [ + "false", + "false", + 0.5, + "true" + ] + }, + { + "id": 67, + "type": "Face Swap (mtb)", + "pos": [ + 1127, + -26 + ], + "size": [ + 315, + 122 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 161 + }, + { + "name": "reference", + "type": "IMAGE", + "link": 160, + "slot_index": 1 + }, + { + "name": "faceswap_model", + "type": "FACESWAP_MODEL", + "link": 147, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 149, + 156 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Face Swap (mtb)" + }, + "widgets_values": [ + "0", + "false" + ] + }, + { + "id": 71, + "type": "Load Face Enhance Model (mtb)", + "pos": [ + 1476, + 240 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "bg_upsampler", + "type": "UPSCALE_MODEL", + "link": 152, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "model", + "type": "FACEENHANCE_MODEL", + "links": [ + 151 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Load Face Enhance Model (mtb)" + }, + "widgets_values": [ + "GFPGANv1.4.pth", + 2 + ] + }, + { + "id": 72, + "type": "UpscaleModelLoader", + "pos": [ + 1110, + 241 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "UPSCALE_MODEL", + "type": "UPSCALE_MODEL", + "links": [ + 152 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "UpscaleModelLoader" + }, + "widgets_values": [ + "4x-UltraSharp.pth" + ] + }, + { + "id": 73, + "type": "PreviewImage", + "pos": [ + 760, + -630 + ], + "size": [ + 671.1859741210938, + 483.1548156738281 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 153 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 76, + "type": "Load Image From Url (mtb)", + "pos": [ + 161, + 231 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 160 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Load Image From Url (mtb)" + }, + "widgets_values": [ + "https://lucasmuseum.org/assets/general/Lucas_Headshot_Color_web.jpg" + ] + }, + { + "id": 69, + "type": "Load Face Swap Model (mtb)", + "pos": [ + 172, + 503 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "FACESWAP_MODEL", + "type": "FACESWAP_MODEL", + "links": [ + 147 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Load Face Swap Model (mtb)" + }, + "widgets_values": [ + "inswapper_128.onnx" + ] + }, + { + "id": 20, + "type": "Styles Loader (mtb)", + "pos": [ + -1341, + 276 + ], + "size": [ + 315, + 78 + ], + "flags": { + "collapsed": false + }, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "positive", + "type": "STRING", + "links": [], + "shape": 3 + }, + { + "name": "negative", + "type": "STRING", + "links": [ + 87 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Styles Loader (mtb)" + }, + "widgets_values": [ + "❌Low Token" + ] + } + ], + "links": [ + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 87, + 20, + 1, + 7, + 1, + "STRING" + ], + [ + 117, + 16, + 2, + 56, + 0, + "*" + ], + [ + 120, + 57, + 0, + 3, + 0, + "MODEL" + ], + [ + 124, + 56, + 0, + 59, + 0, + "*" + ], + [ + 134, + 16, + 0, + 65, + 0, + "*" + ], + [ + 135, + 65, + 0, + 57, + 0, + "*" + ], + [ + 138, + 3, + 0, + 66, + 0, + "LATENT" + ], + [ + 139, + 59, + 0, + 66, + 1, + "VAE" + ], + [ + 141, + 16, + 1, + 6, + 0, + "CLIP" + ], + [ + 142, + 16, + 1, + 7, + 0, + "CLIP" + ], + [ + 147, + 69, + 0, + 67, + 2, + "FACESWAP_MODEL" + ], + [ + 149, + 67, + 0, + 70, + 0, + "IMAGE" + ], + [ + 150, + 70, + 0, + 25, + 0, + "IMAGE" + ], + [ + 151, + 71, + 0, + 70, + 1, + "FACEENHANCE_MODEL" + ], + [ + 152, + 72, + 0, + 71, + 0, + "UPSCALE_MODEL" + ], + [ + 153, + 66, + 0, + 73, + 0, + "IMAGE" + ], + [ + 156, + 67, + 0, + 75, + 0, + "IMAGE" + ], + [ + 160, + 76, + 0, + 67, + 1, + "IMAGE" + ], + [ + 161, + 66, + 0, + 67, + 0, + "IMAGE" + ] + ], + "groups": [ + { + "title": "Txt2Img", + "bounding": [ + -2061, + -234, + 1932, + 973 + ], + "color": "#a1309b", + "locked": false + }, + { + "title": "Save Intermediate Image", + "bounding": [ + 147, + -152, + 303, + 213 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "SWAP & RESTORED", + "bounding": [ + 2305, + -741, + 789, + 638 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "SWAP", + "bounding": [ + 1520, + -743, + 774, + 642 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "SD OUTPUT", + "bounding": [ + 655, + -745, + 854, + 648 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/examples/02-film_interpolation.json b/examples/02-film_interpolation.json new file mode 100644 index 0000000..218de4f --- /dev/null +++ b/examples/02-film_interpolation.json @@ -0,0 +1,1349 @@ +{ + "last_node_id": 82, + "last_link_id": 165, + "nodes": [ + { + "id": 59, + "type": "Reroute", + "pos": [ + -670, + 980 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 124 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 139, + 154 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 56, + "type": "Reroute", + "pos": [ + -2050, + 980 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 117 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 124 + ] + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 65, + "type": "Reroute", + "pos": [ + -1990, + 160 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 134 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 135 + ] + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 57, + "type": "Reroute", + "pos": [ + -1200, + 150 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 135 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 120, + 143 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + -1410, + 660 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2, + 153 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 512, + 1 + ] + }, + { + "id": 67, + "type": "Text", + "pos": [ + -2280, + 250 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 142, + 164 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Text" + }, + "widgets_values": [ + "Photo of a Face of a man (looking down), rim lighting, tokyo 1987" + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + -1010, + 320 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 120 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + }, + { + "name": "seed", + "type": "INT", + "link": 148, + "widget": { + "name": "seed", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 138 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 542821171532978, + "fixed", + 45, + 8, + "euler_ancestral", + "simple", + 1 + ] + }, + { + "id": 72, + "type": "PrimitiveNode", + "pos": [ + -1290, + 880 + ], + "size": [ + 210, + 82 + ], + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 148, + 150 + ], + "widget": { + "name": "seed", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + }, + "slot_index": 0 + } + ], + "title": "seed", + "properties": {}, + "widgets_values": [ + 542821171532978, + "fixed" + ] + }, + { + "id": 79, + "type": "Load Film Model (mtb)", + "pos": [ + 720, + 590 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "FILM_MODEL", + "type": "FILM_MODEL", + "links": [ + 161 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Load Film Model (mtb)" + }, + "widgets_values": [ + "Style" + ] + }, + { + "id": 80, + "type": "PreviewImage", + "pos": [ + 1120, + 190 + ], + "size": [ + 210, + 246 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 163, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 71, + "type": "CLIPTextEncode", + "pos": [ + -610, + 400 + ], + "size": [ + 210, + 75.28300476074219 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 152, + "slot_index": 0 + }, + { + "name": "text", + "type": "STRING", + "link": 151, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 144 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "Face of a man (looking down), rim lighting, tokyo 1987" + ] + }, + { + "id": 19, + "type": "CLIPSetLastLayer", + "pos": [ + -1980, + 400 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 116 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 28, + 29, + 152 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPSetLastLayer" + }, + "widgets_values": [ + -2 + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -1320, + 330 + ], + "size": [ + 210, + 54 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 28 + }, + { + "name": "text", + "type": "STRING", + "link": 142, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "Face of a man (looking down), rim lighting, tokyo 1987" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -1320, + 430 + ], + "size": [ + 210, + 54 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 29 + }, + { + "name": "text", + "type": "STRING", + "link": 87, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6, + 145 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "worst quality, hands, embedding:EasyNegative," + ] + }, + { + "id": 20, + "type": "Styles Loader (mtb)", + "pos": [ + -1650, + 500 + ], + "size": [ + 285.32098388671875, + 78 + ], + "flags": { + "collapsed": false + }, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "positive", + "type": "STRING", + "links": [], + "shape": 3 + }, + { + "name": "negative", + "type": "STRING", + "links": [ + 87 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Styles Loader (mtb)" + }, + "widgets_values": [ + "❌Low Token" + ] + }, + { + "id": 81, + "type": "Reroute", + "pos": [ + -1740, + -10 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 164 + } + ], + "outputs": [ + { + "name": "", + "type": "STRING", + "links": [ + 165 + ] + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 69, + "type": "String Replace (mtb)", + "pos": [ + -1000, + 0 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "string", + "type": "STRING", + "link": 165 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 151 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String Replace (mtb)" + }, + "widgets_values": [ + "(looking down)", + "(looking up)" + ] + }, + { + "id": 75, + "type": "VAEDecodeTiled", + "pos": [ + 220, + 620 + ], + "size": [ + 210, + 46 + ], + "flags": { + "collapsed": false + }, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 155, + "slot_index": 0 + }, + { + "name": "vae", + "type": "VAE", + "link": 154, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 158 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecodeTiled" + } + }, + { + "id": 78, + "type": "Film Interpolation (mtb)", + "pos": [ + 1140, + 520 + ], + "size": [ + 315, + 78 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 160, + "slot_index": 0 + }, + { + "name": "film_model", + "type": "FILM_MODEL", + "link": 161, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 162 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Film Interpolation (mtb)" + }, + "widgets_values": [ + 4 + ] + }, + { + "id": 70, + "type": "KSampler", + "pos": [ + -300, + 380 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 143 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 144 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 145 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 153, + "slot_index": 3 + }, + { + "name": "seed", + "type": "INT", + "link": 150, + "widget": { + "name": "seed", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + }, + "slot_index": 4 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 155 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 542821171532978, + "fixed", + 45, + 8, + "euler_ancestral", + "simple", + 1 + ] + }, + { + "id": 16, + "type": "CheckpointLoaderSimple", + "pos": [ + -2470, + 530 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 134 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 116 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 117 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "revAnimated_v122.safetensors" + ] + }, + { + "id": 82, + "type": "Note", + "pos": [ + -398, + 25 + ], + "size": [ + 500.61550625887776, + 270.54209761186075 + ], + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "THIS LOOKS A BIT MESSY BUT WE BASICALLY USE THE SAME INPUTS (MODEL, SEED etc), ONLY STRING REPLACING THE PROMPT, LOOKING DOWN -> LOOKING UP" + }, + "widgets_values": [ + "THIS LOOKS A BIT MESSY BUT WE BASICALLY USE THE SAME INPUTS (MODEL, SEED etc), ONLY STRING REPLACING THE PROMPT, LOOKING DOWN -> LOOKING UP\n" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 66, + "type": "VAEDecodeTiled", + "pos": [ + 231, + 403 + ], + "size": [ + 210, + 46 + ], + "flags": { + "collapsed": false + }, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 138 + }, + { + "name": "vae", + "type": "VAE", + "link": 139, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 157 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecodeTiled" + } + }, + { + "id": 77, + "type": "Concat Images (mtb)", + "pos": [ + 661, + 402 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "imageA", + "type": "IMAGE", + "link": 157 + }, + { + "name": "imageB", + "type": "IMAGE", + "link": 158 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 160, + 163 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Concat Images (mtb)" + } + }, + { + "id": 76, + "type": "PreviewImage", + "pos": [ + 1517, + 207 + ], + "size": [ + 511.33429483925056, + 675.8856963258129 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 162 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + } + ], + "links": [ + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 28, + 19, + 0, + 6, + 0, + "CLIP" + ], + [ + 29, + 19, + 0, + 7, + 0, + "CLIP" + ], + [ + 87, + 20, + 1, + 7, + 1, + "STRING" + ], + [ + 116, + 16, + 1, + 19, + 0, + "CLIP" + ], + [ + 117, + 16, + 2, + 56, + 0, + "*" + ], + [ + 120, + 57, + 0, + 3, + 0, + "MODEL" + ], + [ + 124, + 56, + 0, + 59, + 0, + "*" + ], + [ + 134, + 16, + 0, + 65, + 0, + "*" + ], + [ + 135, + 65, + 0, + 57, + 0, + "*" + ], + [ + 138, + 3, + 0, + 66, + 0, + "LATENT" + ], + [ + 139, + 59, + 0, + 66, + 1, + "VAE" + ], + [ + 142, + 67, + 0, + 6, + 1, + "STRING" + ], + [ + 143, + 57, + 0, + 70, + 0, + "MODEL" + ], + [ + 144, + 71, + 0, + 70, + 1, + "CONDITIONING" + ], + [ + 145, + 7, + 0, + 70, + 2, + "CONDITIONING" + ], + [ + 148, + 72, + 0, + 3, + 4, + "INT" + ], + [ + 150, + 72, + 0, + 70, + 4, + "INT" + ], + [ + 151, + 69, + 0, + 71, + 1, + "STRING" + ], + [ + 152, + 19, + 0, + 71, + 0, + "CLIP" + ], + [ + 153, + 5, + 0, + 70, + 3, + "LATENT" + ], + [ + 154, + 59, + 0, + 75, + 1, + "VAE" + ], + [ + 155, + 70, + 0, + 75, + 0, + "LATENT" + ], + [ + 157, + 66, + 0, + 77, + 0, + "IMAGE" + ], + [ + 158, + 75, + 0, + 77, + 1, + "IMAGE" + ], + [ + 160, + 77, + 0, + 78, + 0, + "IMAGE" + ], + [ + 161, + 79, + 0, + 78, + 1, + "FILM_MODEL" + ], + [ + 162, + 78, + 0, + 76, + 0, + "IMAGE" + ], + [ + 163, + 77, + 0, + 80, + 0, + "IMAGE" + ], + [ + 164, + 67, + 0, + 81, + 0, + "*" + ], + [ + 165, + 81, + 0, + 69, + 0, + "STRING" + ] + ], + "groups": [ + { + "title": "GENERATE IMAGES", + "bounding": [ + -2477, + -219, + 3018, + 1350 + ], + "color": "#a1309b", + "locked": false + }, + { + "title": "FILM INTERPOLATION", + "bounding": [ + 559, + 66, + 1264, + 832 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..23bf2e8 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,12 @@ +# Examples +All the examples use the [RevAnimated model 1.22](https://civitai.com/models/7371?modelVersionId=46846) +## 01 Faceswap + +This example showcase the `Face Swap` & `Restore Face` nodes to replace the character with Georges Lucas's face. +The face reference image is using the `Load Image From Url` node to avoid bundling input images. + + + +## 02 FILM interpolation +This example showcase the FILM interpolation implementation. Here we do text replacement on the condition of two distinct images sharing the same model, input latent & seed to get relatively close images. + diff --git a/html/style.css b/html/style.css new file mode 100644 index 0000000..9c10a3e --- /dev/null +++ b/html/style.css @@ -0,0 +1,52 @@ +html { + height: 100%; + margin: 0; + padding: 0; + background-color: rgb(33, 33, 33); + color: whitesmoke; +} + +body { + margin: 0; + padding: 0; + font-family: monospace; + height: 100%; + background-color: rgb(33, 33, 33); + +} + +.title { + font-size: 2.5em; + font-weight: 700; + margin: 1em; +} + +header { + display: flex; + align-items: center; + vertical-align: middle; + justify-content: space-between; + background-color: rgb(12, 12, 12); + padding: 1em; + margin: 0; +} + +main { + display: flex; + align-items: center; + vertical-align: middle; + justify-content: center; + padding: 1em; + margin: 0; + height: 80%; +} + +.flex-container { + display: flex; + flex-direction: column; +} + +.menu { + font-size: 3em; + text-align: center; +} \ No newline at end of file diff --git a/install.py b/install.py new file mode 100644 index 0000000..06aefcd --- /dev/null +++ b/install.py @@ -0,0 +1,472 @@ +import requests +import os +import ast +import re +import argparse +import sys +import subprocess +from importlib import import_module +import platform +from pathlib import Path +import sys +import zipfile +import shutil +import stat + + +here = Path(__file__).parent +executable = sys.executable + +# - detect mode +mode = None +if os.environ.get("COLAB_GPU"): + mode = "colab" +elif "python_embeded" in executable: + mode = "embeded" +elif ".venv" in executable: + mode = "venv" + + +if mode == None: + mode = "unknown" + +# region ansi +# ANSI escape sequences for text styling +ANSI_FORMATS = { + "reset": "\033[0m", + "bold": "\033[1m", + "dim": "\033[2m", + "italic": "\033[3m", + "underline": "\033[4m", + "blink": "\033[5m", + "reverse": "\033[7m", + "strike": "\033[9m", +} + +ANSI_COLORS = { + "black": "\033[30m", + "red": "\033[31m", + "green": "\033[32m", + "yellow": "\033[33m", + "blue": "\033[34m", + "magenta": "\033[35m", + "cyan": "\033[36m", + "white": "\033[37m", + "bright_black": "\033[30;1m", + "bright_red": "\033[31;1m", + "bright_green": "\033[32;1m", + "bright_yellow": "\033[33;1m", + "bright_blue": "\033[34;1m", + "bright_magenta": "\033[35;1m", + "bright_cyan": "\033[36;1m", + "bright_white": "\033[37;1m", + "bg_black": "\033[40m", + "bg_red": "\033[41m", + "bg_green": "\033[42m", + "bg_yellow": "\033[43m", + "bg_blue": "\033[44m", + "bg_magenta": "\033[45m", + "bg_cyan": "\033[46m", + "bg_white": "\033[47m", + "bg_bright_black": "\033[40;1m", + "bg_bright_red": "\033[41;1m", + "bg_bright_green": "\033[42;1m", + "bg_bright_yellow": "\033[43;1m", + "bg_bright_blue": "\033[44;1m", + "bg_bright_magenta": "\033[45;1m", + "bg_bright_cyan": "\033[46;1m", + "bg_bright_white": "\033[47;1m", +} + + +def apply_format(text, *formats): + """Apply ANSI escape sequences for the specified formats to the given text.""" + formatted_text = text + for format in formats: + formatted_text = f"{ANSI_FORMATS.get(format, '')}{formatted_text}{ANSI_FORMATS.get('reset', '')}" + return formatted_text + + +def apply_color(text, color=None, background=None): + """Apply ANSI escape sequences for the specified color and background to the given text.""" + formatted_text = text + if color: + formatted_text = f"{ANSI_COLORS.get(color, '')}{formatted_text}{ANSI_FORMATS.get('reset', '')}" + if background: + formatted_text = f"{ANSI_COLORS.get(background, '')}{formatted_text}{ANSI_FORMATS.get('reset', '')}" + return formatted_text + + +def print_formatted(text, *formats, color=None, background=None, **kwargs): + """Print the given text with the specified formats, color, and background.""" + formatted_text = apply_format(text, *formats) + formatted_text = apply_color(formatted_text, color, background) + file = kwargs.get("file", sys.stdout) + print( + apply_color(apply_format("[mtb install] ", "bold"), color="yellow"), + formatted_text, + file=file, + ) + + +# endregion + +try: + import requirements +except ImportError: + print_formatted("Installing requirements-parser...", "italic", color="yellow") + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "requirements-parser"] + ) + import requirements + + print_formatted("Done.", "italic", color="green") + +try: + from tqdm import tqdm +except ImportError: + print_formatted("Installing tqdm...", "italic", color="yellow") + subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", "tqdm"]) + from tqdm import tqdm +import importlib + + +pip_map = { + "onnxruntime-gpu": "onnxruntime", + "opencv-contrib": "cv2", + "tb-nightly": "tensorboard", + "protobuf": "google.protobuf", + # Add more mappings as needed +} + + +def is_pipe(): + try: + mode = os.fstat(0).st_mode + return ( + stat.S_ISFIFO(mode) + or stat.S_ISREG(mode) + or stat.S_ISBLK(mode) + or stat.S_ISSOCK(mode) + ) + except OSError: + return False + + +# Get the version from __init__.py +def get_local_version(): + init_file = os.path.join(os.path.dirname(__file__), "__init__.py") + if os.path.isfile(init_file): + with open(init_file, "r") as f: + tree = ast.parse(f.read()) + for node in ast.walk(tree): + if isinstance(node, ast.Assign): + for target in node.targets: + if ( + isinstance(target, ast.Name) + and target.id == "__version__" + and isinstance(node.value, ast.Str) + ): + return node.value.s + return None + + +def download_file(url, file_name): + with requests.get(url, stream=True) as response: + response.raise_for_status() + total_size = int(response.headers.get("content-length", 0)) + with open(file_name, "wb") as file, tqdm( + desc=file_name.stem, + total=total_size, + unit="B", + unit_scale=True, + unit_divisor=1024, + ) as progress_bar: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + progress_bar.update(len(chunk)) + + +def get_requirements(path: Path): + with open(path.resolve(), "r") as requirements_file: + requirements_txt = requirements_file.read() + + try: + parsed_requirements = requirements.parse(requirements_txt) + except AttributeError: + print_formatted( + f"Failed to parse {path}. Please make sure the file is correctly formatted.", + "bold", + color="red", + ) + + return + + return parsed_requirements + + +def try_import(requirement): + dependency = requirement.name.strip() + import_name = pip_map.get(dependency, dependency) + installed = False + + pip_name = dependency + if specs := requirement.specs: + pip_name += "".join(specs[0]) + + try: + import_module(import_name) + print_formatted( + f"Package {pip_name} already installed (import name: '{import_name}').", + "bold", + color="green", + ) + installed = True + except ImportError: + pass + + return (installed, pip_name, import_name) + + +def import_or_install(requirement, dry=False): + installed, pip_name, import_name = try_import(requirement) + + if not installed: + print_formatted(f"Installing package {pip_name}...", "italic", color="yellow") + if dry: + print_formatted( + f"Dry-run: Package {pip_name} would be installed (import name: '{import_name}').", + color="yellow", + ) + else: + try: + subprocess.check_call( + [sys.executable, "-m", "pip", "install", pip_name] + ) + print_formatted( + f"Package {pip_name} installed successfully using pip package name (import name: '{import_name}')", + "bold", + color="green", + ) + except subprocess.CalledProcessError as e: + print_formatted( + f"Failed to install package {pip_name} using pip package name (import name: '{import_name}'). Error: {str(e)}", + "bold", + color="red", + ) + + +# Install dependencies from requirements.txt +def install_dependencies(dry=False): + parsed_requirements = get_requirements(here / "requirements.txt") + if not parsed_requirements: + return + print_formatted( + "Installing dependencies from requirements.txt...", "italic", color="yellow" + ) + + for requirement in parsed_requirements: + import_or_install(requirement, dry=dry) + + if mode == "venv": + parsed_requirements = get_requirements(here / "requirements-wheels.txt") + if not parsed_requirements: + return + for requirement in parsed_requirements: + import_or_install(requirement, dry=dry) + + +if __name__ == "__main__": + full = False + if is_pipe(): + print_formatted("Pipe detected, full install...", color="green") + # we clone our repo + url = "https://github.com/melmass/comfy_mtb.git" + clone_dir = here / "custom_nodes" / "comfy_mtb" + if not clone_dir.exists(): + clone_dir.parent.mkdir(parents=True, exist_ok=True) + print_formatted(f"Cloning {url} to {clone_dir}", "italic", color="yellow") + subprocess.check_call(["git", "clone", "--recursive", url, clone_dir]) + + # os.chdir(clone_dir) + here = clone_dir + full = True + + if len(sys.argv) == 1: + print_formatted( + "No arguments provided, doing a full install/update...", + "italic", + color="yellow", + ) + + full = True + + # Parse command-line arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "--wheels", "-w", action="store_true", help="Install wheel dependencies" + ) + parser.add_argument( + "--requirements", "-r", action="store_true", help="Install requirements.txt" + ) + parser.add_argument( + "--dry", + action="store_true", + help="Print what will happen without doing it (still making requests to the GH Api)", + ) + + # parser.add_argument( + # "--version", + # default=get_local_version(), + # help="Version to check against the GitHub API", + # ) + + args = parser.parse_args() + + wheels_directory = here / "wheels" + print_formatted(f"Detected environment: {apply_color(mode,'cyan')}") + + # Install dependencies from requirements.txt + # if args.requirements or mode == "venv": + install_dependencies(dry=args.dry) + + if (not args.wheels and mode not in ["colab", "embeded"]) and not full: + print_formatted( + "Skipping wheel installation. Use --wheels to install wheel dependencies. (only needed for Comfy embed)", + "italic", + color="yellow", + ) + sys.exit() + + if mode in ["colab", "embeded"]: + print_formatted( + f"Downloading and installing release wheels since we are in a Comfy {apply_color(mode,'cyan')} environment", + ) + if full: + print_formatted( + f"Downloading and installing release wheels since no arguments where provided" + ) + + # - Check the env before proceeding. + missing_wheels = False + parsed_requirements = get_requirements(here / "requirements-wheels.txt") + if parsed_requirements: + for requirement in parsed_requirements: + installed, pip_name, import_name = try_import(requirement) + if not installed: + missing_wheels = True + break + + if not missing_wheels: + print_formatted( + f"All required wheels are already installed.", "italic", color="green" + ) + sys.exit() + + # Fetch the JSON data from the GitHub API URL + owner = "melmass" + repo = "comfy_mtb" + # version = args.version + current_platform = platform.system().lower() + + # Get the tag version from the GitHub API + tag_url = f"https://api.github.com/repos/{owner}/{repo}/releases/latest" + response = requests.get(tag_url) + if response.status_code == 404: + # print_formatted( + # f"Tag version '{apply_color(version,'cyan')}' not found for {owner}/{repo} repository." + # ) + print_formatted("Error retrieving the release assets.", color="red") + sys.exit() + + tag_data = response.json() + tag_name = tag_data["name"] + + # # Compare the local and tag versions + # if version and tag_name: + # if re.match(r"v?(\d+(\.\d+)+)", version) and re.match( + # r"v?(\d+(\.\d+)+)", tag_name + # ): + # version_parts = [int(part) for part in version.lstrip("v").split(".")] + # tag_version_parts = [int(part) for part in tag_name.lstrip("v").split(".")] + + # if version_parts > tag_version_parts: + # print_formatted( + # f"Local version ({version}) is greater than the release version ({tag_name}).", + # "bold", + # "yellow", + # ) + # sys.exit() + + # Download the assets for the given version + matching_assets = [ + asset for asset in tag_data["assets"] if current_platform in asset["name"] + ] + if not matching_assets: + print_formatted( + f"Unsupported operating system: {current_platform}", color="yellow" + ) + + wheels_directory.mkdir(exist_ok=True) + + for asset in matching_assets: + asset_name = asset["name"] + asset_download_url = asset["browser_download_url"] + print_formatted(f"Downloading asset: {asset_name}", color="yellow") + asset_dest = wheels_directory / asset_name + download_file(asset_download_url, asset_dest) + + # - Unzip to wheels dir + whl_files = [] + with zipfile.ZipFile(asset_dest, "r") as zip_ref: + for item in tqdm(zip_ref.namelist(), desc="Extracting", unit="file"): + if item.endswith(".whl"): + item_basename = os.path.basename(item) + target_path = wheels_directory / item_basename + with zip_ref.open(item) as source, open( + target_path, "wb" + ) as target: + whl_files.append(target_path) + shutil.copyfileobj(source, target) + + print_formatted( + f"Wheels extracted for {current_platform} to the '{wheels_directory}' directory.", + "bold", + color="green", + ) + + if whl_files: + for whl_file in tqdm(whl_files, desc="Installing", unit="package"): + whl_path = wheels_directory / whl_file + + # check if installed + try: + whl_dep = whl_path.name.split("-")[0] + import_name = pip_map.get(whl_dep, whl_dep) + import_module(import_name) + tqdm.write( + f"Package {import_name} already installed, skipping wheel installation.", + ) + continue + except ImportError: + if args.dry: + tqdm.write( + f"Dry-run: Package {whl_path.name} would be installed.", + ) + continue + + tqdm.write("Installing wheel: " + whl_path.name) + + subprocess.check_call( + [ + sys.executable, + "-m", + "pip", + "install", + whl_path.resolve().as_posix(), + ] + ) + + print_formatted("Wheels installation completed.", color="green") + else: + print_formatted("No .whl files found. Nothing to install.", color="yellow") diff --git a/interpolate_frames.py b/interpolate_frames.py new file mode 100644 index 0000000..6c235f0 --- /dev/null +++ b/interpolate_frames.py @@ -0,0 +1,142 @@ +import glob +from pathlib import Path +import uuid +import sys +from typing import List + +sys.path.append((Path(__file__).parent / "extern").as_posix()) + + +import argparse +from rich_argparse import RichHelpFormatter +from rich.console import Console +from rich.progress import Progress + +import numpy as np +import subprocess + + +def write_prores_444_video(output_file, frames: List[np.ndarray], fps): + # Convert float images to the range of 0-65535 (12-bit color depth) + frames = [(frame * 65535).clip(0, 65535).astype(np.uint16) for frame in frames] + + height, width, _ = frames[0].shape + + # Prepare the FFmpeg command + command = [ + "ffmpeg", + "-y", # Overwrite output file if it already exists + "-f", + "rawvideo", + "-vcodec", + "rawvideo", + "-s", + f"{width}x{height}", + "-pix_fmt", + "rgb48le", + "-r", + str(fps), + "-i", + "-", + "-c:v", + "prores_ks", + "-profile:v", + "4", + "-pix_fmt", + "yuva444p10le", + "-r", + str(fps), + "-y", # Overwrite output file if it already exists + output_file, + ] + + process = subprocess.Popen(command, stdin=subprocess.PIPE) + + for frame in frames: + process.stdin.write(frame.tobytes()) + + process.stdin.close() + process.wait() + + +if __name__ == "__main__": + default_output = f"./output_{uuid.uuid4()}.mov" + parser = argparse.ArgumentParser( + description="FILM frame interpolation", formatter_class=RichHelpFormatter + ) + parser.add_argument("inputs", nargs="*", help="Input image files") + parser.add_argument("--output", help="Output JSON file", default=default_output) + parser.add_argument("-v", "--verbose", action="store_true", help="Verbose mode") + parser.add_argument( + "--glob", help="Enable glob pattern matching", metavar="PATTERN" + ) + parser.add_argument( + "--interpolate", type=int, default=4, help="Time for interpolated frames" + ) + parser.add_argument("--fps", type=int, default=30, help="Out FPS") + align = 64 + block_width = 2 + block_height = 2 + + args = parser.parse_args() + + # - checks + if not args.glob and not args.inputs: + parser.error("Either --glob flag or inputs must be provided.") + if args.glob: + glob_pattern = args.glob + try: + pattern_path = str(Path(glob_pattern).expanduser().resolve()) + + if not any(glob.glob(pattern_path)): + raise ValueError(f"No files found for glob pattern: {glob_pattern}") + except Exception as e: + console = Console() + console.print( + f"[bold red]Error: Invalid glob pattern '{glob_pattern}': {e}[/bold red]" + ) + + exit(1) + else: + glob_pattern = None + + input_files: List[Path] = [] + + if glob_pattern: + input_files = [ + Path(p) + for p in list(glob.glob(str(Path(glob_pattern).expanduser().resolve()))) + ] + else: + input_files = [Path(p) for p in args.inputs] + + console = Console() + console.print("Input Files:", style="bold", end=" ") + console.print(f"{len(input_files):03d} files", style="cyan") + # for input_file in args.inputs: + # console.print(f"- {input_file}", style="cyan") + console.print("\nOutput File:", style="bold", end=" ") + console.print(f"{Path(args.output).resolve().absolute()}", style="cyan") + + with Progress(console=console, auto_refresh=True) as progress: + from frame_interpolation.eval import util + from frame_interpolation.eval import util, interpolator + + # files = Path(pth).rglob("*.png") + + model = interpolator.Interpolator( + "G:/MODELS/FILM/pretrained_models/film_net/Style", None + ) # [2,2] + + task = progress.add_task("[cyan]Interpolating frames...", total=1) + + frames = list( + util.interpolate_recursively_from_files( + [x.as_posix() for x in input_files], args.interpolate, model + ) + ) + + # mediapy.write_video(args.output, frames, fps=args.fps) + write_prores_444_video(args.output, frames, fps=args.fps) + progress.update(task, advance=1) + progress.refresh() diff --git a/node_list.json b/node_list.json new file mode 100644 index 0000000..42dc354 --- /dev/null +++ b/node_list.json @@ -0,0 +1,43 @@ +{ + "Animation Builder (mtb)": "Convenient way to manage basic animation maths at the core of many of my workflows", + "Bbox (mtb)": "The bounding box (BBOX) custom type used by other nodes", + "Bbox From Mask (mtb)": "From a mask extract the bounding box", + "Blur (mtb)": "Blur an image using a Gaussian filter.", + "Color Correct (mtb)": "Various color correction methods", + "Colored Image (mtb)": "Constant color image of given size", + "Concat Images (mtb)": "Add images to batch", + "Crop (mtb)": "Crops an image and an optional mask to a given bounding box\n\n The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type\n The BBOX input takes precedence over the tuple input\n ", + "Debug (mtb)": "Experimental node to debug any Comfy values, support for more types and widgets is planned", + "Deep Bump (mtb)": "Normal & height maps generation from single pictures", + "Export To Prores (mtb)": "Export to ProRes 4444 (Experimental)", + "Face Swap (mtb)": "Face swap using deepinsight/insightface models", + "Film Interpolation (mtb)": "Google Research FILM frame interpolation for large motion", + "Fit Number (mtb)": "Fit the input float using a source and target range", + "Float To Number (mtb)": "Node addon for the WAS Suite. Converts a \"comfy\" FLOAT to a NUMBER.", + "Get Batch From History (mtb)": "Very experimental node to load images from the history of the server.\n\n Queue items without output are ignore in the count.", + "Image Compare (mtb)": "Compare two images and return a difference image", + "Image Premultiply (mtb)": "Premultiply image with mask", + "Image Remove Background Rembg (mtb)": "Removes the background from the input using Rembg.", + "Image Resize Factor (mtb)": "Extracted mostly from WAS Node Suite, with a few edits (most notably multiple image support) and less features.", + "Int To Bool (mtb)": "Basic int to bool conversion", + "Int To Number (mtb)": "Node addon for the WAS Suite. Converts a \"comfy\" INT to a NUMBER.", + "Latent Lerp (mtb)": "Linear interpolation (blend) between two latent vectors", + "Latent Noise (mtb)": "Inject noise into latent space", + "Latent Transform (mtb)": "Dumb attempt at reproducing some deforum like motion", + "Load Face Enhance Model (mtb)": "Loads a GFPGan or RestoreFormer model for face enhancement.", + "Load Face Swap Model (mtb)": "Loads a faceswap model", + "Load Film Model (mtb)": "Loads a FILM model", + "Load Image From Url (mtb)": "Load an image from the given URL", + "Load Image Sequence (mtb)": "Load an image sequence from a folder. The current frame is used to determine which image to load.\n\n Usually used in conjunction with the `Primitive` node set to increment to load a sequence of images from a folder.\n Use -1 to load all matching frames as a batch.\n ", + "Mask To Image (mtb)": "Converts a mask (alpha) to an RGB image with a color and background", + "Qr Code (mtb)": "Basic QR Code generator", + "Restore Face (mtb)": "Uses GFPGan to restore faces", + "Save Gif (mtb)": "Save the images from the batch as a GIF", + "Save Image Grid (mtb)": "Save all the images in the input batch as a grid of images.", + "Save Image Sequence (mtb)": "Save an image sequence to a folder. The current frame is used to determine which image to save.\n\n This is merely a wrapper around the `save_images` function with formatting for the output folder and filename.\n ", + "Smart Step (mtb)": "Utils to control the steps start/stop of the KAdvancedSampler in percentage", + "String Replace (mtb)": "Basic string replacement", + "Styles Loader (mtb)": "Load csv files and populate a dropdown from the rows (\u00e0 la A111)", + "Text To Image (mtb)": "Utils to convert text to image using a font\n\n\n The tool looks for any .ttf file in the Comfy folder hierarchy.\n ", + "Uncrop (mtb)": "Uncrops an image to a given bounding box\n\n The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type\n The BBOX input takes precedence over the tuple input" +} \ No newline at end of file diff --git a/nodes/animation.py b/nodes/animation.py new file mode 100644 index 0000000..14d564e --- /dev/null +++ b/nodes/animation.py @@ -0,0 +1,47 @@ +from ..log import log + + +class AnimationBuilder: + """Convenient way to manage basic animation maths at the core of many of my workflows""" + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "total_frames": ("INT", {"default": 100, "min": 0}), + # "fps": ("INT", {"default": 12, "min": 0}), + "scale_float": ("FLOAT", {"default": 1.0, "min": 0.0}), + "loop_count": ("INT", {"default": 1, "min": 0}), + "raw_iteration": ("INT", {"default": 0, "min": 0}), + "raw_loop": ("INT", {"default": 0, "min": 0}), + }, + } + + RETURN_TYPES = ("INT", "FLOAT", "INT", "BOOL") + RETURN_NAMES = ("frame", "0-1 (scaled)", "count", "loop_ended") + CATEGORY = "mtb/animation" + FUNCTION = "build_animation" + + def build_animation( + self, + total_frames=100, + # fps=12, + scale_float=1.0, + loop_count=1, # set in js + raw_iteration=0, # set in js + raw_loop=0, # set in js + ): + frame = raw_iteration % (total_frames) + scaled = (frame / (total_frames - 1)) * scale_float + # if frame == 0: + # log.debug("Reseting history") + # PromptServer.instance.prompt_queue.wipe_history() + log.debug(f"frame: {frame}/{total_frames} scaled: {scaled}") + + return (frame, scaled, raw_loop, (frame == (total_frames - 1))) + + +__nodes__ = [AnimationBuilder] diff --git a/nodes/conditions.py b/nodes/conditions.py index 287fb7a..ffcf268 100644 --- a/nodes/conditions.py +++ b/nodes/conditions.py @@ -35,7 +35,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("INT", "INT", "INT") RETURN_NAMES = ("step", "start", "end") FUNCTION = "do_step" - CATEGORY = "conditioning" + CATEGORY = "mtb/conditioning" def do_step(self, step, start_percent, end_percent): start = int(step * start_percent / 100) @@ -92,7 +92,7 @@ def INPUT_TYPES(cls): } } - CATEGORY = "conditioning" + CATEGORY = "mtb/conditioning" RETURN_TYPES = ("STRING", "STRING") RETURN_NAMES = ("positive", "negative") @@ -168,7 +168,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) RETURN_NAMES = ("image",) FUNCTION = "text_to_image" - CATEGORY = "utils" + CATEGORY = "mtb/generate" def text_to_image( self, text, font, wrap, font_size, width, height, color, background diff --git a/nodes/crop.py b/nodes/crop.py index 2aa03f0..fc7751a 100644 --- a/nodes/crop.py +++ b/nodes/crop.py @@ -1,11 +1,14 @@ import torch -from ..utils import tensor2pil, pil2tensor -from PIL import Image, ImageFilter, ImageDraw +from ..utils import tensor2pil, pil2tensor, tensor2np, np2tensor +from PIL import Image, ImageFilter, ImageDraw, ImageChops import numpy as np +from ..log import log -class BoundingBox: + +class Bbox: """The bounding box (BBOX) custom type used by other nodes""" + def __init__(self): pass @@ -13,6 +16,7 @@ def __init__(self): def INPUT_TYPES(cls): return { "required": { + # "bbox": ("BBOX",), "x": ("INT", {"default": 0, "max": 10000000, "min": 0, "step": 1}), "y": ("INT", {"default": 0, "max": 10000000, "min": 0, "step": 1}), "width": ( @@ -28,14 +32,16 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("BBOX",) FUNCTION = "do_crop" - CATEGORY = "image/crop" + CATEGORY = "mtb/crop" - def do_crop(self, x, y, width, height): + def do_crop(self, x, y, width, height): # bbox return (x, y, width, height) + # return bbox -class BBoxFromMask: +class BboxFromMask: """From a mask extract the bounding box""" + def __init__(self): pass @@ -59,13 +65,25 @@ def INPUT_TYPES(cls): "image (optional)", ) FUNCTION = "extract_bounding_box" - CATEGORY = "image/crop" + CATEGORY = "mtb/crop" def extract_bounding_box(self, mask: torch.Tensor, image=None): + # if image != None: + # if mask.size(0) != image.size(0): + # if mask.size(0) != 1: + # log.error( + # f"Batch count mismatch for mask and image, it can either be 1 mask for X images, or X masks for X images (mask: {mask.shape} | image: {image.shape})" + # ) - mask = tensor2pil(mask) + # raise Exception( + # f"Batch count mismatch for mask and image, it can either be 1 mask for X images, or X masks for X images (mask: {mask.shape} | image: {image.shape})" + # ) + + _mask = tensor2pil(1.0 - mask)[0] + + # we invert it + alpha_channel = np.array(_mask) - alpha_channel = np.array(mask) non_zero_indices = np.nonzero(alpha_channel) min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) @@ -74,11 +92,16 @@ def extract_bounding_box(self, mask: torch.Tensor, image=None): # Create a bounding box tuple if image != None: # Convert the image to a NumPy array - image = image.numpy() - # Crop the image from the bounding box - image = image[:, min_y:max_y, min_x:max_x] - image = torch.from_numpy(image) - + imgs = tensor2np(image) + out = [] + for img in imgs: + # Crop the image from the bounding box + img = img[min_y:max_y, min_x:max_x, :] + log.debug(f"Cropped image to shape {img.shape}") + out.append(img) + + image = np2tensor(out) + log.debug(f"Cropped images shape: {image.shape}") bounding_box = (min_x, min_y, max_x - min_x, max_y - min_y) return ( bounding_box, @@ -92,6 +115,7 @@ class Crop: The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type The BBOX input takes precedence over the tuple input """ + def __init__(self): pass @@ -120,12 +144,11 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE", "MASK", "BBOX") FUNCTION = "do_crop" - CATEGORY = "image/crop" + CATEGORY = "mtb/crop" def do_crop( self, image: torch.Tensor, mask=None, x=0, y=0, width=256, height=256, bbox=None ): - image = image.numpy() if mask: mask = mask.numpy() @@ -144,11 +167,44 @@ def do_crop( ) +# def calculate_intersection(rect1, rect2): +# x_left = max(rect1[0], rect2[0]) +# y_top = max(rect1[1], rect2[1]) +# x_right = min(rect1[2], rect2[2]) +# y_bottom = min(rect1[3], rect2[3]) + +# return (x_left, y_top, x_right, y_bottom) + + +def bbox_check(bbox, target_size=None): + if not target_size: + return bbox + + new_bbox = ( + bbox[0], + bbox[1], + min(target_size[0] - bbox[0], bbox[2]), + min(target_size[1] - bbox[1], bbox[3]), + ) + if new_bbox != bbox: + log.warn(f"BBox too big, constrained to {new_bbox}") + + return new_bbox + + +def bbox_to_region(bbox, target_size=None): + bbox = bbox_check(bbox, target_size) + + # to region + return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]) + + class Uncrop: """Uncrops an image to a given bounding box The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type The BBOX input takes precedence over the tuple input""" + def __init__(self): pass @@ -169,7 +225,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "do_crop" - CATEGORY = "image/crop" + CATEGORY = "mtb/crop" def do_crop(self, image, crop_image, bbox, border_blending): def inset_border(image, border_width=20, border_color=(0)): @@ -182,41 +238,63 @@ def inset_border(image, border_width=20, border_color=(0)): ) return bordered_image - image = tensor2pil(image) - crop_img = tensor2pil(crop_image) - crop_img = crop_img.convert("RGB") + single = image.size(0) == 1 + if image.size(0) != crop_image.size(0): + if not single: + raise ValueError( + "The Image batch count is greater than 1, but doesn't match the crop_image batch count. If using batches they should either match or only crop_image must be greater than 1" + ) + + images = tensor2pil(image) + crop_imgs = tensor2pil(crop_image) + out_images = [] + for i, crop in enumerate(crop_imgs): + if single: + img = images[0] + else: + img = images[i] + + # uncrop the image based on the bounding box + bb_x, bb_y, bb_width, bb_height = bbox + + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + # log.debug(f"Paste region: {paste_region}") + # new_region = adjust_paste_region(img.size, paste_region) + # log.debug(f"Adjusted paste region: {new_region}") + # # Check if the adjusted paste region is different from the original + + crop_img = crop.convert("RGB") - # uncrop the image based on the bounding box - bb_x, bb_y, bb_width, bb_height = bbox + log.debug(f"Crop image size: {crop_img.size}") + log.debug(f"Image size: {img.size}") - if border_blending > 1.0: - border_blending = 1.0 - elif border_blending < 0.0: - border_blending = 0.0 + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 - blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) - blend = image.convert("RGBA") - mask = Image.new("L", image.size, 0) + blend = img.convert("RGBA") + mask = Image.new("L", img.size, 0) - mask_block = Image.new("L", (bb_width, bb_height), 255) - mask_block = inset_border(mask_block, int(blend_ratio / 2), (0)) + mask_block = Image.new("L", (bb_width, bb_height), 255) + mask_block = inset_border(mask_block, int(blend_ratio / 2), (0)) - mask.paste(mask_block, (bb_x, bb_y, bb_x + bb_width, bb_y + bb_height)) - blend.paste(crop_img, (bb_x, bb_y, bb_x + bb_width, bb_y + bb_height)) + mask.paste(mask_block, paste_region) + log.debug(f"Blend size: {blend.size} | kind {blend.mode}") + log.debug(f"Crop image size: {crop_img.size} | kind {crop_img.mode}") + log.debug(f"BBox: {paste_region}") + blend.paste(crop_img, paste_region) - mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) - mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) - blend.putalpha(mask) - image = Image.alpha_composite(image.convert("RGBA"), blend) + blend.putalpha(mask) + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) - return (pil2tensor(image.convert("RGB")),) + return (pil2tensor(out_images),) -__nodes__ = [ - BBoxFromMask, - BoundingBox, - Crop, - Uncrop -] \ No newline at end of file +__nodes__ = [BboxFromMask, Bbox, Crop, Uncrop] diff --git a/nodes/debug.py b/nodes/debug.py new file mode 100644 index 0000000..ba6bdb9 --- /dev/null +++ b/nodes/debug.py @@ -0,0 +1,57 @@ +from ..utils import tensor2pil +from ..log import log +import io, base64 +import torch + + +class Debug: + """Experimental node to debug any Comfy values, support for more types and widgets is planned""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"anything_1": ("*")}, + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "do_debug" + CATEGORY = "mtb/debug" + OUTPUT_NODE = True + + def do_debug(self, **kwargs): + output = { + "ui": {"b64_images": [], "text": []}, + "result": ("A"), + } + for k, v in kwargs.items(): + anything = v + text = "" + if isinstance(anything, torch.Tensor): + log.debug(f"Tensor: {anything.shape}") + + # write the images to temp + + image = tensor2pil(anything) + b64_imgs = [] + for im in image: + buffered = io.BytesIO() + im.save(buffered, format="JPEG") + b64_imgs.append( + "data:image/jpeg;base64," + + base64.b64encode(buffered.getvalue()).decode("utf-8") + ) + + output["ui"]["b64_images"] += b64_imgs + log.debug(f"Input {k} contains {len(b64_imgs)} images") + elif isinstance(anything, bool): + log.debug(f"Input {k} contains boolean: {anything}") + output["ui"]["text"] += ["True" if anything else "False"] + else: + text = str(anything) + log.debug(f"Input {k} contains text: {text}") + output["ui"]["text"] += [text] + + return output + + +__nodes__ = [Debug] diff --git a/nodes/deep_bump.py b/nodes/deep_bump.py index f9ece06..639aaa9 100644 --- a/nodes/deep_bump.py +++ b/nodes/deep_bump.py @@ -271,7 +271,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "apply" - CATEGORY = "image processing" + CATEGORY = "mtb/textures" def apply( self, diff --git a/nodes/faceenhance.py b/nodes/faceenhance.py index 33296eb..e2317af 100644 --- a/nodes/faceenhance.py +++ b/nodes/faceenhance.py @@ -16,6 +16,8 @@ class LoadFaceEnhanceModel: + """Loads a GFPGan or RestoreFormer model for face enhancement.""" + def __init__(self) -> None: pass @@ -42,7 +44,7 @@ def INPUT_TYPES(cls): [x.name for x in cls.get_models()], {"default": "None"}, ), - "upscale": ("INT", {"default": 2}), + "upscale": ("INT", {"default": 1}), }, "optional": {"bg_upsampler": ("UPSCALE_MODEL", {"default": None})}, } @@ -50,7 +52,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("FACEENHANCE_MODEL",) RETURN_NAMES = ("model",) FUNCTION = "load_model" - CATEGORY = "face" + CATEGORY = "mtb/facetools" def load_model(self, model_name, upscale=2, bg_upsampler=None): basic = "RestoreFormer" not in model_name @@ -111,19 +113,21 @@ def enhance(self, img: Image.Image, outscale=2): self.upscale_model.cpu() s = torch.clamp(s.movedim(-3, -1), min=0, max=1.0) - return (tensor2np(s),) + return (tensor2np(s)[0],) import sys class RestoreFace: + """Uses GFPGan to restore faces""" + def __init__(self) -> None: pass RETURN_TYPES = ("IMAGE",) FUNCTION = "restore" - CATEGORY = "face" + CATEGORY = "mtb/facetools" @classmethod def INPUT_TYPES(cls): @@ -150,9 +154,8 @@ def do_restore( weight, save_tmp_steps, ) -> torch.Tensor: - pimage = tensor2pil(image) - width, height = pimage.size - + pimage = tensor2np(image)[0] + width, height = pimage.shape[1], pimage.shape[0] source_img = cv2.cvtColor(np.array(pimage), cv2.COLOR_RGB2BGR) sys.stdout = NullWriter() diff --git a/nodes/faceswap.py b/nodes/faceswap.py index 8dcf7ac..aa1fc25 100644 --- a/nodes/faceswap.py +++ b/nodes/faceswap.py @@ -46,7 +46,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("FACESWAP_MODEL",) FUNCTION = "load_model" - CATEGORY = "face" + CATEGORY = "mtb/facetools" def load_model(self, faceswap_model: str): model_path = os.path.join( @@ -88,7 +88,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "swap" - CATEGORY = "face" + CATEGORY = "mtb/facetools" def swap( self, @@ -100,8 +100,8 @@ def swap( ): def do_swap(img): model_management.throw_exception_if_processing_interrupted() - img = tensor2pil(img) - ref = tensor2pil(reference) + img = tensor2pil(img)[0] + ref = tensor2pil(reference)[0] face_ids = { int(x) for x in faces_index.strip(",").split(",") if x.isnumeric() } diff --git a/nodes/fun.py b/nodes/fun.py index a53341e..5661686 100644 --- a/nodes/fun.py +++ b/nodes/fun.py @@ -2,6 +2,53 @@ from ..utils import pil2tensor from PIL import Image +# class MtbExamples: +# """MTB Example Images""" + +# def __init__(self): +# pass + +# @classmethod +# @lru_cache(maxsize=1) +# def get_root(cls): +# return here / "examples" / "samples" + +# @classmethod +# def INPUT_TYPES(cls): +# input_dir = cls.get_root() +# files = [f.name for f in input_dir.iterdir() if f.is_file()] +# return { +# "required": {"image": (sorted(files),)}, +# } + +# RETURN_TYPES = ("IMAGE", "MASK") +# FUNCTION = "do_mtb_examples" +# CATEGORY = "fun" + +# def do_mtb_examples(self, image, index): +# image_path = (self.get_root() / image).as_posix() + +# i = Image.open(image_path) +# i = ImageOps.exif_transpose(i) +# image = i.convert("RGB") +# image = np.array(image).astype(np.float32) / 255.0 +# image = torch.from_numpy(image)[None,] +# if "A" in i.getbands(): +# mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0 +# mask = 1.0 - torch.from_numpy(mask) +# else: +# mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") +# return (image, mask) + +# @classmethod +# def IS_CHANGED(cls, image): +# image_path = (cls.get_root() / image).as_posix() + +# m = hashlib.sha256() +# with open(image_path, "rb") as f: +# m.update(f.read()) +# return m.digest().hex() + class QrCode: """Basic QR Code generator""" @@ -31,7 +78,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "do_qr" - CATEGORY = "fun" + CATEGORY = "mtb/generate" def do_qr(self, url, width, height, error_correct, box_size, border, invert): if error_correct == "L" or error_correct not in ["M", "Q", "H"]: @@ -63,4 +110,7 @@ def do_qr(self, url, width, height, error_correct, box_size, border, invert): return (pil2tensor(code),) -__nodes__ = [QrCode] +__nodes__ = [ + QrCode, + # MtbExamples, +] diff --git a/nodes/graph_utils.py b/nodes/graph_utils.py index dabb09c..e191fbc 100644 --- a/nodes/graph_utils.py +++ b/nodes/graph_utils.py @@ -1,69 +1,75 @@ -import torch -import folder_paths -import os +from ..log import log -class SaveTensors: - """Debug node that will probably be removed in the future""" +class StringReplace: + """Basic string replacement""" - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"forceInput": True}), + "old": ("STRING", {"default": ""}), + "new": ("STRING", {"default": ""}), + } + } + + FUNCTION = "replace_str" + RETURN_TYPES = ("STRING",) + CATEGORY = "mtb/string" + + def replace_str(self, string: str, old: str, new: str): + log.debug(f"Current string: {string}") + log.debug(f"Find string: {old}") + log.debug(f"Replace string: {new}") + + string = string.replace(old, new) + + log.debug(f"New string: {string}") + + return (string,) + + +class FitNumber: + """Fit the input float using a source and target range""" @classmethod def INPUT_TYPES(cls): return { "required": { - "filename_prefix": ("STRING", {"default": "ComfyPickle"}), - }, - "optional": { - "image": ("IMAGE",), - "mask": ("MASK",), - "latent": ("LATENT",), - }, + "value": ("FLOAT", {"default": 0, "forceInput": True}), + "clamp": ("BOOL", {"default": False}), + "source_min": ("FLOAT", {"default": 0.0}), + "source_max": ("FLOAT", {"default": 1.0}), + "target_min": ("FLOAT", {"default": 0.0}), + "target_max": ("FLOAT", {"default": 1.0}), + } } - FUNCTION = "save" - OUTPUT_NODE = True - RETURN_TYPES = () - CATEGORY = "utils" + FUNCTION = "set_range" + RETURN_TYPES = ("FLOAT",) + CATEGORY = "mtb/math" - def save( + def set_range( self, - filename_prefix, - image: torch.Tensor = None, - mask: torch.Tensor = None, - latent: torch.Tensor = None, + value: float, + clamp: bool, + source_min: float, + source_max: float, + target_min: float, + target_max: float, ): - ( - full_output_folder, - filename, - counter, - subfolder, - filename_prefix, - ) = folder_paths.get_save_image_path(filename_prefix, self.output_dir) - - if image is not None: - image_file = f"{filename}_image_{counter:05}.pt" - torch.save(image, os.path.join(full_output_folder, image_file)) - # np.save(os.path.join(full_output_folder, image_file), image.cpu().numpy()) - - if mask is not None: - mask_file = f"{filename}_mask_{counter:05}.pt" - torch.save(mask, os.path.join(full_output_folder, mask_file)) - # np.save(os.path.join(full_output_folder, mask_file), mask.cpu().numpy()) - - if latent is not None: - # for latent we must use pickle - latent_file = f"{filename}_latent_{counter:05}.pt" - torch.save(latent, os.path.join(full_output_folder, latent_file)) - # pickle.dump(latent, open(os.path.join(full_output_folder, latent_file), "wb")) - - # np.save(os.path.join(full_output_folder, latent_file), latent[""].cpu().numpy()) - - return f"{filename_prefix}_{counter:05}" - - -__nodes__ = [ - SaveTensors, -] + res = target_min + (target_max - target_min) * (value - source_min) / ( + source_max - source_min + ) + + if clamp: + if target_min > target_max: + res = max(min(res, target_min), target_max) + else: + res = max(min(res, target_max), target_min) + + return (res,) + + +__nodes__ = [StringReplace, FitNumber] diff --git a/nodes/image_interpolation.py b/nodes/image_interpolation.py index 1f94a5a..aa91a32 100644 --- a/nodes/image_interpolation.py +++ b/nodes/image_interpolation.py @@ -7,13 +7,100 @@ import torch from frame_interpolation.eval import util, interpolator from ..utils import tensor2np -import uuid import numpy as np -import subprocess import comfy - +from PIL import Image +import urllib.request +import urllib.parse +import json import tensorflow as tf import comfy.model_management as model_management +import io + +from comfy.cli_args import args +from ..utils import pil2tensor + + +def get_image(filename, subfolder, folder_type): + data = {"filename": filename, "subfolder": subfolder, "type": folder_type} + url_values = urllib.parse.urlencode(data) + with urllib.request.urlopen( + "http://{}:{}/view?{}".format(args.listen, args.port, url_values) + ) as response: + return io.BytesIO(response.read()) + + +class GetBatchFromHistory: + """Very experimental node to load images from the history of the server. + + Queue items without output are ignore in the count.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "enable": ("BOOL", {"default": True}), + "count": ("INT", {"default": 1, "min": 0}), + "offset": ("INT", {"default": 0, "min": -1e9, "max": 1e9}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = "images" + CATEGORY = "mtb/animation" + FUNCTION = "load_from_history" + + def load_from_history( + self, + enable=True, + count=0, + offset=0, + ): + if not enable or count == 0: + log.debug("Load from history is disabled for this iteration") + return (torch.zeros(0),) + frames = [] + + with urllib.request.urlopen( + "http://{}:{}/history".format(args.listen, args.port) + ) as response: + history = json.loads(response.read()) + + output_images = [] + for k, run in history.items(): + for o in run["outputs"]: + for node_id in run["outputs"]: + node_output = run["outputs"][node_id] + if "images" in node_output: + images_output = [] + for image in node_output["images"]: + image_data = get_image( + image["filename"], image["subfolder"], image["type"] + ) + images_output.append(image_data) + output_images.extend(images_output) + if len(output_images) == 0: + return (torch.zeros(0),) + for i, image in enumerate(list(reversed(output_images))): + if i < offset: + continue + if i >= offset + count: + break + # Decode image as tensor + img = Image.open(image) + log.debug(f"Image from history {i} of shape {img.size}") + frames.append(img) + + # Display the shape of the tensor + # print("Tensor shape:", image_tensor.shape) + + # return (output_images,) + + output = pil2tensor( + list(reversed(frames)), + ) + + return (output,) class LoadFilmModel: @@ -39,7 +126,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("FILM_MODEL",) FUNCTION = "load_model" - CATEGORY = "face" + CATEGORY = "mtb/frame iterpolation" def load_model(self, film_model: str): model_path = Path(folder_paths.models_dir) / "FILM" / film_model @@ -73,7 +160,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "do_interpolation" - CATEGORY = "animation" + CATEGORY = "mtb/frame iterpolation" def do_interpolation( self, @@ -82,6 +169,10 @@ def do_interpolation( film_model: interpolator.Interpolator, ): n = images.size(0) + # check if images is an empty tensor and return it... + if n == 0: + return (images,) + # check if tensorflow GPU is available available_gpus = tf.config.list_physical_devices("GPU") if not len(available_gpus): @@ -124,7 +215,7 @@ def __init__(self): RETURN_TYPES = ("IMAGE",) FUNCTION = "concat_images" - CATEGORY = "animation" + CATEGORY = "mtb/image" @classmethod def INPUT_TYPES(cls): @@ -156,87 +247,9 @@ def concat_images(self, imageA: torch.Tensor, imageB: torch.Tensor): return (self.concatenate_tensors(imageA, imageB),) -class ExportToProRes: - """Export to ProRes 4444 (Experimental)""" - - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "images": ("IMAGE",), - # "frames": ("FRAMES",), - "fps": ("FLOAT", {"default": 24, "min": 1}), - "prefix": ("STRING", {"default": "export"}), - } - } - - RETURN_TYPES = ("VIDEO",) - OUTPUT_NODE = True - FUNCTION = "export_prores" - CATEGORY = "animation" - - def export_prores( - self, - images: torch.Tensor, - fps: float, - prefix: str, - ): - output_dir = Path(folder_paths.get_output_directory()) - id = f"{prefix}_{uuid.uuid4()}.mov" - - log.debug(f"Exporting to {output_dir / id}") - - frames = tensor2np(images) - log.debug(f"Frames type {type(frames)}") - log.debug(f"Exporting {len(frames)} frames") - - frames = [frame.astype(np.uint16) * 257 for frame in frames] - - height, width, _ = frames[0].shape - - out_path = (output_dir / id).as_posix() - - # Prepare the FFmpeg command - command = [ - "ffmpeg", - "-y", - "-f", - "rawvideo", - "-vcodec", - "rawvideo", - "-s", - f"{width}x{height}", - "-pix_fmt", - "rgb48le", - "-r", - str(fps), - "-i", - "-", - "-c:v", - "prores_ks", - "-profile:v", - "4", - "-pix_fmt", - "yuva444p10le", - "-r", - str(fps), - "-y", - out_path, - ] - - process = subprocess.Popen(command, stdin=subprocess.PIPE) - - for frame in frames: - model_management.throw_exception_if_processing_interrupted() - process.stdin.write(frame.tobytes()) - - process.stdin.close() - process.wait() - - return (out_path,) - - -__nodes__ = [LoadFilmModel, FilmInterpolation, ExportToProRes, ConcatImages] +__nodes__ = [ + LoadFilmModel, + FilmInterpolation, + ConcatImages, + GetBatchFromHistory, +] diff --git a/nodes/image_processing.py b/nodes/image_processing.py index e277dd1..ad302be 100644 --- a/nodes/image_processing.py +++ b/nodes/image_processing.py @@ -68,7 +68,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "correct" - CATEGORY = "image/postprocessing" + CATEGORY = "mtb/image processing" @staticmethod def gamma_correction_tensor(image, gamma): @@ -90,19 +90,21 @@ def offset_adjustment_tensor(image, offset): @staticmethod def hsv_adjustment(image: torch.Tensor, hue, saturation, value): - image = tensor2pil(image) - hsv_image = image.convert("HSV") + images = tensor2pil(image) + out = [] + for img in images: + hsv_image = img.convert("HSV") - h, s, v = hsv_image.split() + h, s, v = hsv_image.split() - h = h.point(lambda x: (x + hue * 255) % 256) - s = s.point(lambda x: int(x * saturation)) - v = v.point(lambda x: int(x * value)) + h = h.point(lambda x: (x + hue * 255) % 256) + s = s.point(lambda x: int(x * saturation)) + v = v.point(lambda x: int(x * value)) - hsv_image = Image.merge("HSV", (h, s, v)) - rgb_image = hsv_image.convert("RGB") - - return pil2tensor(rgb_image) + hsv_image = Image.merge("HSV", (h, s, v)) + rgb_image = hsv_image.convert("RGB") + out.append(rgb_image) + return pil2tensor(out) @staticmethod def hsv_adjustment_tensor_not_working(image: torch.Tensor, hue, saturation, value): @@ -182,64 +184,6 @@ def correct( return (image,) -class HsvToRgb: - """Convert HSV image to RGB""" - - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image": ("IMAGE",), - } - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "convert" - CATEGORY = "image/postprocessing" - - def convert(self, image): - image = image.numpy() - - image = image.squeeze() - # image = image.transpose(1,2,3,0) - image = hsv2rgb(image) - image = np.expand_dims(image, axis=0) - - # image = image.transpose(3,0,1,2) - return (torch.from_numpy(image),) - - -class RgbToHsv: - """Convert RGB image to HSV""" - - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image": ("IMAGE",), - } - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "convert" - CATEGORY = "image/postprocessing" - - def convert(self, image): - image = image.numpy() - - image = np.squeeze(image) - image = rgb2hsv(image) - image = np.expand_dims(image, axis=0) - - return (torch.from_numpy(image),) - - class ImageCompare: """Compare two images and return a difference image""" @@ -261,7 +205,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "compare" - CATEGORY = "image" + CATEGORY = "mtb/image" def compare(self, imageA: torch.Tensor, imageB: torch.Tensor, mode): imageA = imageA.numpy() @@ -276,35 +220,33 @@ def compare(self, imageA: torch.Tensor, imageB: torch.Tensor, mode): return (torch.from_numpy(image),) -class Denoise: - """Denoise an image using total variation minimization.""" +import requests - def __init__(self): - pass + +class LoadImageFromUrl: + """Load an image from the given URL""" @classmethod def INPUT_TYPES(cls): return { "required": { - "image": ("IMAGE",), - "weight": ( - "FLOAT", - {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}, + "url": ( + "STRING", + { + "default": "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Example.jpg/800px-Example.jpg" + }, ), } } RETURN_TYPES = ("IMAGE",) - FUNCTION = "denoise" - CATEGORY = "image/postprocessing" - - def denoise(self, image: torch.Tensor, weight): - image = image.numpy() - image = image.squeeze() - image = denoise_tv_chambolle(image, weight=weight) + FUNCTION = "load" + CATEGORY = "mtb/IO" - image = np.expand_dims(image, axis=0) - return (torch.from_numpy(image),) + def load(self, url): + # get the image from the url + image = Image.open(requests.get(url, stream=True).raw) + return (pil2tensor(image),) class Blur: @@ -331,7 +273,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("IMAGE",) FUNCTION = "blur" - CATEGORY = "image/postprocessing" + CATEGORY = "mtb/image processing" def blur(self, image: torch.Tensor, sigmaX, sigmaY): image = image.numpy() @@ -342,29 +284,29 @@ def blur(self, image: torch.Tensor, sigmaX, sigmaY): # https://github.com/lllyasviel/AdverseCleaner/blob/main/clean.py -def deglaze_np_img(np_img): - y = np_img.copy() - for _ in range(64): - y = cv2.bilateralFilter(y, 5, 8, 8) - for _ in range(4): - y = guidedFilter(np_img, y, 4, 16) - return y +# def deglaze_np_img(np_img): +# y = np_img.copy() +# for _ in range(64): +# y = cv2.bilateralFilter(y, 5, 8, 8) +# for _ in range(4): +# y = guidedFilter(np_img, y, 4, 16) +# return y -class DeglazeImage: - """Remove adversarial noise from images""" +# class DeglazeImage: +# """Remove adversarial noise from images""" - @classmethod - def INPUT_TYPES(cls): - return {"required": {"image": ("IMAGE",)}} +# @classmethod +# def INPUT_TYPES(cls): +# return {"required": {"image": ("IMAGE",)}} - CATEGORY = "image" +# CATEGORY = "mtb/image processing" - RETURN_TYPES = ("IMAGE",) - FUNCTION = "deglaze_image" +# RETURN_TYPES = ("IMAGE",) +# FUNCTION = "deglaze_image" - def deglaze_image(self, image): - return (np2tensor(deglaze_np_img(tensor2np(image))),) +# def deglaze_image(self, image): +# return (np2tensor(deglaze_np_img(tensor2np(image))),) class MaskToImage: @@ -383,7 +325,7 @@ def INPUT_TYPES(cls): } } - CATEGORY = "image/mask" + CATEGORY = "mtb/generate" RETURN_TYPES = ("IMAGE",) @@ -424,7 +366,7 @@ def INPUT_TYPES(cls): } } - CATEGORY = "image" + CATEGORY = "mtb/generate" RETURN_TYPES = ("IMAGE",) @@ -454,32 +396,42 @@ def INPUT_TYPES(cls): } } - CATEGORY = "image" + CATEGORY = "mtb/image" RETURN_TYPES = ("IMAGE",) FUNCTION = "premultiply" def premultiply(self, image, mask, invert): invert = invert == "True" - image = tensor2pil(image) - mask = tensor2pil(mask).convert("L") + images = tensor2pil(image) if invert: - mask = ImageChops.invert(mask) + masks = tensor2pil(mask) # .convert("L") + else: + masks = tensor2pil(1.0 - mask) + + single = False + if len(mask) == 1: + single = True - image.putalpha(mask) + masks = [x.convert("L") for x in masks] + + out = [] + for i, img in enumerate(images): + cur_mask = masks[0] if single else masks[i] + + img.putalpha(cur_mask) + out.append(img) # if invert: # image = Image.composite(image,Image.new("RGBA", image.size, color=(0,0,0,0)), mask) # else: # image = Image.composite(Image.new("RGBA", image.size, color=(0,0,0,0)), image, mask) - return (pil2tensor(image),) + return (pil2tensor(out),) class ImageResizeFactor: - """ - Extracted mostly from WAS Node Suite, with a few edits (most notably multiple image support) and less features. - """ + """Extracted mostly from WAS Node Suite, with a few edits (most notably multiple image support) and less features.""" def __init__(self): pass @@ -504,7 +456,7 @@ def INPUT_TYPES(cls): }, } - CATEGORY = "image" + CATEGORY = "mtb/image" RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "resize" @@ -624,7 +576,7 @@ def INPUT_TYPES(cls): OUTPUT_NODE = True - CATEGORY = "image" + CATEGORY = "mtb/IO" def create_image_grid(self, image_list): total_images = len(image_list) @@ -706,15 +658,13 @@ def save_images( __nodes__ = [ ColorCorrect, - HsvToRgb, - RgbToHsv, ImageCompare, - Denoise, Blur, - DeglazeImage, + # DeglazeImage, MaskToImage, ColoredImage, ImagePremultiply, ImageResizeFactor, SaveImageGrid, + LoadImageFromUrl, ] diff --git a/nodes/io.py b/nodes/io.py new file mode 100644 index 0000000..873d284 --- /dev/null +++ b/nodes/io.py @@ -0,0 +1,171 @@ +from ..utils import tensor2np +import uuid +import folder_paths +from ..log import log +import comfy.model_management as model_management +import subprocess +import torch +from pathlib import Path +import numpy as np + + +class ExportToProres: + """Export to ProRes 4444 (Experimental)""" + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + # "frames": ("FRAMES",), + "fps": ("FLOAT", {"default": 24, "min": 1}), + "prefix": ("STRING", {"default": "export"}), + } + } + + RETURN_TYPES = ("VIDEO",) + OUTPUT_NODE = True + FUNCTION = "export_prores" + CATEGORY = "mtb/IO" + + def export_prores( + self, + images: torch.Tensor, + fps: float, + prefix: str, + ): + if images.size(0) == 0: + return ("",) + output_dir = Path(folder_paths.get_output_directory()) + id = f"{prefix}_{uuid.uuid4()}.mov" + + log.debug(f"Exporting to {output_dir / id}") + + frames = tensor2np(images) + log.debug(f"Frames type {type(frames[0])}") + log.debug(f"Exporting {len(frames)} frames") + + frames = [frame.astype(np.uint16) * 257 for frame in frames] + + height, width, _ = frames[0].shape + + out_path = (output_dir / id).as_posix() + + # Prepare the FFmpeg command + command = [ + "ffmpeg", + "-y", + "-f", + "rawvideo", + "-vcodec", + "rawvideo", + "-s", + f"{width}x{height}", + "-pix_fmt", + "rgb48le", + "-r", + str(fps), + "-i", + "-", + "-c:v", + "prores_ks", + "-profile:v", + "4", + "-pix_fmt", + "yuva444p10le", + "-r", + str(fps), + "-y", + out_path, + ] + + process = subprocess.Popen(command, stdin=subprocess.PIPE) + + for frame in frames: + model_management.throw_exception_if_processing_interrupted() + process.stdin.write(frame.tobytes()) + + process.stdin.close() + process.wait() + + return (out_path,) + + +class SaveGif: + """Save the images from the batch as a GIF""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "fps": ("INT", {"default": 12, "min": 1, "max": 120}), + "resize_by": ("FLOAT", {"default": 1.0, "min": 0.1}), + "pingpong": ("BOOL", {"default": False}), + } + } + + RETURN_TYPES = () + OUTPUT_NODE = True + CATEGORY = "mtb/IO" + FUNCTION = "save_gif" + + def save_gif(self, image, fps=12, resize_by=1.0, pingpong=False): + if image.size(0) == 0: + return ("",) + + images = tensor2np(image) + images = [frame.astype(np.uint8) for frame in images] + if pingpong: + reversed_frames = images[::-1] + images.extend(reversed_frames) + + height, width, _ = image[0].shape + + ruuid = uuid.uuid4() + + ruuid = ruuid.hex[:10] + + out_path = f"{folder_paths.output_directory}/{ruuid}.gif" + + log.debug(f"Saving a gif file {width}x{height} as {ruuid}.gif") + + # Prepare the FFmpeg command + command = [ + "ffmpeg", + "-y", + "-f", + "rawvideo", + "-vcodec", + "rawvideo", + "-s", + f"{width}x{height}", + "-pix_fmt", + "rgb24", # GIF only supports rgb24 + "-r", + str(fps), + "-i", + "-", + "-vf", + f"fps={fps},scale={width * resize_by}:-1", # Set frame rate and resize if necessary + "-y", + out_path, + ] + + process = subprocess.Popen(command, stdin=subprocess.PIPE) + + for frame in images: + model_management.throw_exception_if_processing_interrupted() + process.stdin.write(frame.tobytes()) + + process.stdin.close() + process.wait() + results = [] + results.append({"filename": f"{ruuid}.gif", "subfolder": "", "type": "output"}) + return {"ui": {"gif": results}} + + +__nodes__ = [SaveGif, ExportToProres] diff --git a/nodes/latent_processing.py b/nodes/latent_processing.py index 93d35dd..63e3ad1 100644 --- a/nodes/latent_processing.py +++ b/nodes/latent_processing.py @@ -18,7 +18,7 @@ def INPUT_TYPES(cls): RETURN_TYPES = ("LATENT",) FUNCTION = "lerp_latent" - CATEGORY = "latent" + CATEGORY = "mtb/latent" def lerp_latent(self, A, B, t): a = A.copy() diff --git a/nodes/mask.py b/nodes/mask.py index 828478d..6745dfe 100644 --- a/nodes/mask.py +++ b/nodes/mask.py @@ -1,8 +1,12 @@ from rembg import remove from ..utils import pil2tensor, tensor2pil from PIL import Image +import comfy.utils + class ImageRemoveBackgroundRembg: + """Removes the background from the input using Rembg.""" + def __init__(self): pass @@ -11,24 +15,67 @@ def INPUT_TYPES(cls): return { "required": { "image": ("IMAGE",), - "alpha_matting": (["True","False"], {"default":"False"},), - "alpha_matting_foreground_threshold": ("INT", {"default":240, "min": 0, "max": 255},), - "alpha_matting_background_threshold": ("INT", {"default":10, "min": 0, "max": 255},), - "alpha_matting_erode_size": ("INT", {"default":10, "min": 0, "max": 255},), - "post_process_mask": (["True","False"], {"default":"False"},), - "bgcolor": ("COLOR", {"default":"black"},), - + "alpha_matting": ( + ["True", "False"], + {"default": "False"}, + ), + "alpha_matting_foreground_threshold": ( + "INT", + {"default": 240, "min": 0, "max": 255}, + ), + "alpha_matting_background_threshold": ( + "INT", + {"default": 10, "min": 0, "max": 255}, + ), + "alpha_matting_erode_size": ( + "INT", + {"default": 10, "min": 0, "max": 255}, + ), + "post_process_mask": ( + ["True", "False"], + {"default": "False"}, + ), + "bgcolor": ( + "COLOR", + {"default": "black"}, + ), }, } - RETURN_TYPES = ("IMAGE","MASK","IMAGE",) - RETURN_NAMES = ("Image (rgba)","Mask","Image",) + RETURN_TYPES = ( + "IMAGE", + "MASK", + "IMAGE", + ) + RETURN_NAMES = ( + "Image (rgba)", + "Mask", + "Image", + ) FUNCTION = "remove_background" - CATEGORY = "image" + CATEGORY = "mtb/image" + # bgcolor: Optional[Tuple[int, int, int, int]] - def remove_background(self, image, alpha_matting, alpha_matting_foreground_threshold, alpha_matting_background_threshold, alpha_matting_erode_size, post_process_mask, bgcolor): - image = remove( - data=tensor2pil(image), + def remove_background( + self, + image, + alpha_matting, + alpha_matting_foreground_threshold, + alpha_matting_background_threshold, + alpha_matting_erode_size, + post_process_mask, + bgcolor, + ): + pbar = comfy.utils.ProgressBar(image.size(0)) + images = tensor2pil(image) + + out_img = [] + out_mask = [] + out_img_on_bg = [] + + for img in images: + img_rm = remove( + data=img, alpha_matting=alpha_matting == "True", alpha_matting_foreground_threshold=alpha_matting_foreground_threshold, alpha_matting_background_threshold=alpha_matting_background_threshold, @@ -36,22 +83,26 @@ def remove_background(self, image, alpha_matting, alpha_matting_foreground_thres session=None, only_mask=False, post_process_mask=post_process_mask == "True", - bgcolor=None + bgcolor=None, ) - - - # extract the alpha to a new image - mask = image.getchannel(3) - - # add our bgcolor behind the image - image_on_bg = Image.new("RGBA", image.size, bgcolor) - - image_on_bg.paste(image, mask=mask) - - - return (pil2tensor(image), pil2tensor(mask), pil2tensor(image_on_bg)) + + # extract the alpha to a new image + mask = img_rm.getchannel(3) + + # add our bgcolor behind the image + image_on_bg = Image.new("RGBA", img_rm.size, bgcolor) + + image_on_bg.paste(img_rm, mask=mask) + + out_img.append(img_rm) + out_mask.append(mask) + out_img_on_bg.append(image_on_bg) + + pbar.update(1) + + return (pil2tensor(out_img), pil2tensor(out_mask), pil2tensor(out_img_on_bg)) __nodes__ = [ ImageRemoveBackgroundRembg, -] \ No newline at end of file +] diff --git a/nodes/number.py b/nodes/number.py index cf6c6a1..529c15d 100644 --- a/nodes/number.py +++ b/nodes/number.py @@ -1,3 +1,27 @@ +class IntToBool: + """Basic int to bool conversion""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "int": ( + "INT", + { + "default": 0, + }, + ), + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "int_to_bool" + CATEGORY = "mtb/number" + + def int_to_bool(self, int): + return (bool(int),) + + class IntToNumber: """Node addon for the WAS Suite. Converts a "comfy" INT to a NUMBER.""" @@ -8,19 +32,64 @@ def __init__(self): def INPUT_TYPES(cls): return { "required": { - "int": ("INT", {"default": 0, "min": 0, "max": 1e9, "step": 1}), + "int": ( + "INT", + { + "default": 0, + "min": -1e9, + "max": 1e9, + "step": 1, + "forceInput": True, + }, + ), } } RETURN_TYPES = ("NUMBER",) FUNCTION = "int_to_number" - CATEGORY = "number" + CATEGORY = "mtb/number" def int_to_number(self, int): + return (int,) + + +class FloatToNumber: + """Node addon for the WAS Suite. Converts a "comfy" FLOAT to a NUMBER.""" + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "float": ( + "FLOAT", + { + "default": 0, + "min": -1e9, + "max": 1e9, + "step": 1, + "forceInput": True, + }, + ), + } + } + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "float_to_number" + CATEGORY = "mtb/number" + + def float_to_number(self, float): + return (float,) + + return (int,) __nodes__ = [ + FloatToNumber, + IntToBool, IntToNumber, ] \ No newline at end of file diff --git a/nodes/video.py b/nodes/video.py index 86b25f9..7e1505c 100644 --- a/nodes/video.py +++ b/nodes/video.py @@ -10,96 +10,189 @@ import json from ..log import log + + class LoadImageSequence: """Load an image sequence from a folder. The current frame is used to determine which image to load. Usually used in conjunction with the `Primitive` node set to increment to load a sequence of images from a folder. + Use -1 to load all matching frames as a batch. """ + @classmethod def INPUT_TYPES(cls): return { "required": { - "path": ("STRING",{"default":"videos/####.png"}), - "current_frame": ("INT",{"default":0, "min":0, "max": 9999999},), + "path": ("STRING", {"default": "videos/####.png"}), + "current_frame": ( + "INT", + {"default": 0, "min": -1, "max": 9999999}, + ), } } - CATEGORY = "video" + CATEGORY = "mtb/IO" FUNCTION = "load_image" - RETURN_TYPES = ("IMAGE", "MASK", "INT",) - RETURN_NAMES = ("image", "mask", "current_frame",) + RETURN_TYPES = ( + "IMAGE", + "MASK", + "INT", + ) + RETURN_NAMES = ( + "image", + "mask", + "current_frame", + ) def load_image(self, path=None, current_frame=0): + load_all = current_frame == -1 + + if load_all: + log.debug(f"Loading all frames from {path}") + frames = resolve_all_frames(path) + log.debug(f"Found {len(frames)} frames") + + imgs = [] + masks = [] + + for frame in frames: + img, mask = img_from_path(frame) + imgs.append(img) + masks.append(mask) + + out_img = torch.cat(imgs, dim=0) + out_mask = torch.cat(masks, dim=0) + + return ( + out_img, + out_mask, + ) + log.debug(f"Loading image: {path}, {current_frame}") print(f"Loading image: {path}, {current_frame}") resolved_path = resolve_path(path, current_frame) image_path = folder_paths.get_annotated_filepath(resolved_path) - i = Image.open(image_path) - i = ImageOps.exif_transpose(i) - image = i.convert("RGB") - image = np.array(image).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] - if 'A' in i.getbands(): - mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 - mask = 1. - torch.from_numpy(mask) - else: - mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") - return (image, mask, current_frame,) + image, mask = img_from_path(image_path) + return ( + image, + mask, + current_frame, + ) @staticmethod def IS_CHANGED(path="", current_frame=0): print(f"Checking if changed: {path}, {current_frame}") resolved_path = resolve_path(path, current_frame) image_path = folder_paths.get_annotated_filepath(resolved_path) - if os.path.exists(image_path): + if os.path.exists(image_path): m = hashlib.sha256() - with open(image_path, 'rb') as f: + with open(image_path, "rb") as f: m.update(f.read()) return m.digest().hex() return "NONE" # @staticmethod # def VALIDATE_INPUTS(path="", current_frame=0): - + # print(f"Validating inputs: {path}, {current_frame}") # resolved_path = resolve_path(path, current_frame) # if not folder_paths.exists_annotated_filepath(resolved_path): # return f"Invalid image file: {resolved_path}" # return True + +import glob + + +def img_from_path(path): + img = Image.open(path) + img = ImageOps.exif_transpose(img) + image = img.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if "A" in img.getbands(): + mask = np.array(img.getchannel("A")).astype(np.float32) / 255.0 + mask = 1.0 - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return ( + image, + mask, + ) + + +def resolve_all_frames(pattern): + folder_path, file_pattern = os.path.split(pattern) + + log.debug(f"Resolving all frames in {folder_path}") + frames = [] + hash_count = file_pattern.count("#") + frame_pattern = re.sub(r"#+", "*", file_pattern) + + log.debug(f"Found pattern: {frame_pattern}") + + matching_files = glob.glob(os.path.join(folder_path, frame_pattern)) + + log.debug(f"Found {len(matching_files)} matching files") + + frame_regex = re.escape(file_pattern).replace(r"\#", r"(\d+)") + + frame_number_regex = re.compile(frame_regex) + + for file in matching_files: + match = frame_number_regex.search(file) + if match: + frame_number = match.group(1) + log.debug(f"Found frame number: {frame_number}") + # resolved_file = pattern.replace("*" * frame_number.count("#"), frame_number) + frames.append(file) + + frames.sort() # Sort frames alphabetically + return frames + + def resolve_path(path, frame): hashes = path.count("#") padded_number = str(frame).zfill(hashes) return re.sub("#+", padded_number, path) + class SaveImageSequence: """Save an image sequence to a folder. The current frame is used to determine which image to save. This is merely a wrapper around the `save_images` function with formatting for the output folder and filename. """ + def __init__(self): self.output_dir = folder_paths.get_output_directory() self.type = "output" - @classmethod def INPUT_TYPES(cls): - return {"required": { - "images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "Sequence"}), - "current_frame": ("INT", {"default": 0, "min": 0, "max": 9999999}), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } + return { + "required": { + "images": ("IMAGE",), + "filename_prefix": ("STRING", {"default": "Sequence"}), + "current_frame": ("INT", {"default": 0, "min": 0, "max": 9999999}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } RETURN_TYPES = () FUNCTION = "save_images" OUTPUT_NODE = True - CATEGORY = "image" + CATEGORY = "mtb/IO" - def save_images(self, images, filename_prefix="Sequence", current_frame=0, prompt=None, extra_pnginfo=None): + def save_images( + self, + images, + filename_prefix="Sequence", + current_frame=0, + prompt=None, + extra_pnginfo=None, + ): # full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) # results = list() # for image in images: @@ -120,30 +213,39 @@ def save_images(self, images, filename_prefix="Sequence", current_frame=0, promp # "type": self.type # }) # counter += 1 - + if len(images) > 1: raise ValueError("Can only save one image at a time") - + resolved_path = Path(self.output_dir) / filename_prefix resolved_path.mkdir(parents=True, exist_ok=True) - + resolved_img = resolved_path / f"{filename_prefix}_{current_frame:05}.png" - + output_image = images[0].cpu().numpy() - img = Image.fromarray(np.clip(output_image * 255., 0, 255).astype(np.uint8)) + img = Image.fromarray(np.clip(output_image * 255.0, 0, 255).astype(np.uint8)) metadata = PngInfo() if prompt is not None: metadata.add_text("prompt", json.dumps(prompt)) if extra_pnginfo is not None: for x in extra_pnginfo: metadata.add_text(x, json.dumps(extra_pnginfo[x])) - + img.save(resolved_img, pnginfo=metadata, compress_level=4) - return { "ui": { "images": [ { "filename": resolved_img.name, "subfolder": resolved_path.name, "type": self.type } ] } } - - - + return { + "ui": { + "images": [ + { + "filename": resolved_img.name, + "subfolder": resolved_path.name, + "type": self.type, + } + ] + } + } + + __nodes__ = [ LoadImageSequence, SaveImageSequence, -] \ No newline at end of file +] diff --git a/requirements-wheels.txt b/requirements-wheels.txt index 9a2f34a..91e7cb1 100644 --- a/requirements-wheels.txt +++ b/requirements-wheels.txt @@ -1,3 +1,5 @@ insightface==0.7.3 mmcv==2.0.0 -mmdet==3.0.0 \ No newline at end of file +mmdet==3.0.0 +facexlib==0.3.0 +basicsr==1.4.2 diff --git a/requirements.txt b/requirements.txt index abb3097..3b936dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,12 +2,7 @@ onnxruntime-gpu==1.15.1 imageio===2.28.1 qrcode[pil] numpy==1.23.5 -insightface==0.7.3 -mmcv==2.0.0 -mmdet==3.0.0 rembg==2.0.37 -facexlib==0.3.0 -basicsr==1.4.2 # on windows non WSL 2.10 is the last version with GPU support tensorflow<2.11.0; platform_system == "Windows" tb-nightly==2.12.0a20230126; platform_system == "Windows" diff --git a/scripts/a111_extract.py b/scripts/a111_extract.py new file mode 100644 index 0000000..7e38b0b --- /dev/null +++ b/scripts/a111_extract.py @@ -0,0 +1,112 @@ +from pathlib import Path +from PIL import Image +from PIL.PngImagePlugin import PngImageFile, PngInfo +import json +from pprint import pprint +import argparse +from rich.console import Console +from rich.progress import Progress +from rich_argparse import RichHelpFormatter + + +def parse_a111(params, verbose=False): + # params = [p.split(": ") for p in params.split("\n")] + params = params.split("\n") + + prompt = params[0].strip() + neg = params[1].split(":")[1].strip() + + settings = {} + try: + settings = { + s.split(":")[0].strip(): s.split(":")[1].strip() + for s in params[2].split(",") + } + + except IndexError: + settings = {"raw": params[2].strip()} + + if verbose: + print(f"PROMPT: {prompt}") + print(f"NEG: {neg}") + print("SETTINGS:") + pprint(settings, indent=4) + + return {"prompt": prompt, "negative": neg, "settings": settings} + + +import glob + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Crude metadata extractor from A111 pngs", + formatter_class=RichHelpFormatter + ) + parser.add_argument("inputs", nargs="*", help="Input image files") + parser.add_argument("--output", help="Output JSON file") + parser.add_argument("-v", "--verbose", action="store_true", help="Verbose mode") + parser.add_argument( + "--glob", help="Enable glob pattern matching", metavar="PATTERN" + ) + + args = parser.parse_args() + + # - checks + if not args.glob and not args.inputs: + parser.error("Either --glob flag or inputs must be provided.") + if args.glob: + glob_pattern = args.glob + try: + pattern_path = str(Path(glob_pattern).expanduser().resolve()) + + if not any(glob.glob(pattern_path)): + raise ValueError(f"No files found for glob pattern: {glob_pattern}") + except Exception as e: + console = Console() + console.print( + f"[bold red]Error: Invalid glob pattern '{glob_pattern}': {e}[/bold red]" + ) + + exit(1) + else: + glob_pattern = None + + input_files = [] + + if glob_pattern: + input_files = list(glob.glob(str(Path(glob_pattern).expanduser().resolve()))) + else: + input_files = [Path(p) for p in args.inputs] + + console = Console() + console.print("Input Files:", style="bold", end=" ") + console.print(f"{len(input_files):03d} files", style="cyan") + # for input_file in args.inputs: + # console.print(f"- {input_file}", style="cyan") + console.print("\nOutput File:", style="bold", end=" ") + console.print(f"{Path(args.output).resolve().absolute()}", style="cyan") + + with Progress(console=console, auto_refresh=True) as progress: + # files = Path(pth).rglob("*.png") + unique_info = {} + last = None + + task = progress.add_task("[cyan]Extracting meta...", total=len(input_files) + 1) + for p in input_files: + im = Image.open(p) + parsed = parse_a111(im.info["parameters"], args.verbose) + + if parsed != last: + unique_info[Path(p).stem] = parsed + + last = parsed + progress.update(task, advance=1) + progress.refresh() + + unique_info = json.dumps(unique_info, indent=4) + with open(args.output, "w") as f: + f.write(unique_info) + progress.update(task, advance=1) + progress.refresh() + + console.print("\nProcessing completed!", style="bold green") diff --git a/scripts/comfy_meta.py b/scripts/comfy_meta.py new file mode 100644 index 0000000..cb9cb1e --- /dev/null +++ b/scripts/comfy_meta.py @@ -0,0 +1,213 @@ +import argparse +import json +from PIL import Image, PngImagePlugin +from rich.console import Console +from rich import print +from rich_argparse import RichHelpFormatter +import os +from pathlib import Path + +console = Console() + +# BNK_CutoffSetRegions +# BNK_CutoffRegionsToConditioning +# BNK_CutoffBasePrompt + + +# Extracts metadata from a PNG image and returns it as a dictionary +def extract_metadata(image_path): + image = Image.open(image_path) + prompt = image.info.get("prompt", "") + workflow = image.info.get("workflow", "") + + if workflow: + workflow = json.loads(workflow) + + if prompt: + prompt = json.loads(prompt) + + console.print(f"Metadata extracted from [cyan]{image_path}[/cyan].") + + return { + "prompt": prompt, + "workflow": workflow, + } + + +# Embeds metadata into a PNG image +def embed_metadata(image_path, metadata): + image = Image.open(image_path) + o_metadata = image.info + + pnginfo = PngImagePlugin.PngInfo() + if prompt := metadata.get("prompt"): + pnginfo.add_text("prompt", json.dumps(prompt)) + elif "prompt" in o_metadata: + pnginfo.add_text("prompt", o_metadata["prompt"]) + + if workflow := metadata.get("workflow"): + pnginfo.add_text("workflow", json.dumps(workflow)) + elif "workflow" in o_metadata: + pnginfo.add_text("workflow", o_metadata["workflow"]) + + imgp = Path(image_path) + output = imgp.with_stem(f"{imgp.stem}_comfy_embed") + index = 1 + while output.exists(): + output = imgp.with_stem(f"{imgp.stem}_{index}_comfy_embed").with_suffix(".png") + index += 1 + + image.save(output, pnginfo=pnginfo) + console.print(f"Metadata embedded into [cyan]{output}[/cyan].") + + +# CLI subcommand: extract +def extract(args): + input_files = [] + for input_path in args.input: + if os.path.isdir(input_path): + folder_path = input_path + input_files.extend( + [ + os.path.join(folder_path, file_name) + for file_name in os.listdir(folder_path) + if file_name.lower().endswith((".png", ".jpg", ".jpeg")) + ] + ) + else: + input_files.append(input_path) + + if len(input_files) == 1: + metadata = extract_metadata(input_files[0]) + if args.print_output: + print(json.dumps(metadata, indent=4)) + else: + if not args.output: + output = Path(input_files[0]).with_suffix(".json") + index = 1 + while output.exists(): + output = ( + Path(input_files[0]) + .with_stem(f"{Path(input_files[0]).stem}_{index}") + .with_suffix(".json") + ) + index += 1 + else: + output = args.output + with open(output, "w") as file: + json.dump(metadata, file, indent=4) + console.print(f"Metadata extracted and saved to [cyan]{output}[/cyan].") + else: + metadata_dict = {} + for input_file in input_files: + metadata = extract_metadata(input_file) + filename = os.path.basename(input_file) + output = ( + Path(args.output) / f"{filename}.json" + if args.output + else Path(input_file).with_suffix(".json") + ) + index = 1 + while output.exists(): + output = Path(args.output).parent / f"{filename}_{index}.json" + index += 1 + with open(output, "w") as file: + json.dump(metadata, file, indent=4) + metadata_dict[filename] = metadata + if args.output: + with open(args.output, "w") as file: + json.dump(metadata_dict, file, indent=4) + console.print( + f"Metadata extracted and saved to [cyan]{args.output}[/cyan]." + ) + else: + console.print("Multiple metadata files created.") + + +# CLI subcommand: embed +def embed(args): + input_files = [] + for input_path in args.input: + if os.path.isdir(input_path): + folder_path = input_path + input_files.extend( + [ + os.path.join(folder_path, file_name) + for file_name in os.listdir(folder_path) + if file_name.lower().endswith(".json") + ] + ) + else: + input_files.append(input_path) + + for input_file in input_files: + with open(input_file) as file: + metadata = json.load(file) + image_path = input_file.replace(".json", ".png") + if args.output: + output_dir = args.output + if os.path.isdir(output_dir): + output_path = os.path.join(output_dir, os.path.basename(image_path)) + index = 1 + while os.path.exists(output_path): + output_path = os.path.join( + output_dir, + f"{os.path.basename(image_path)}_{index}.png", + ) + index += 1 + else: + output_path = output_dir + else: + output_path = image_path.replace(".png", "_comfy_embed.png") + + embed_metadata(image_path, metadata) + # os.rename(image_path, output_path) + console.print(f"Metadata embedded into [cyan]{output_path}[/cyan].") + + +if __name__ == "__main__": + # Create the main CLI parser + parser = argparse.ArgumentParser( + prog="image-metadata-cli", formatter_class=RichHelpFormatter + ) + subparsers = parser.add_subparsers(title="subcommands") + + # Parser for the "extract" subcommand + extract_parser = subparsers.add_parser( + "extract", + help="Extract metadata from PNG image(s) or folder", + formatter_class=RichHelpFormatter, + ) + extract_parser.add_argument( + "input", nargs="+", help="Input PNG image file(s) or folder path" + ) + extract_parser.add_argument( + "--print", + dest="print_output", + action="store_true", + help="Print the output to stdout", + ) + extract_parser.add_argument("--output", help="Output JSON file(s) or directory") + extract_parser.set_defaults(func=extract) + + # Parser for the "embed" subcommand + embed_parser = subparsers.add_parser( + "embed", + help="Embed metadata into PNG image(s) or folder", + formatter_class=RichHelpFormatter, + ) + embed_parser.add_argument( + "input", nargs="+", help="Input JSON file(s) or folder path" + ) + embed_parser.add_argument("--output", help="Output PNG image file(s) or directory") + embed_parser.set_defaults(func=embed) + + # Parse the command-line arguments and execute the appropriate subcommand + args = parser.parse_args() + if hasattr(args, "func"): + try: + args.func(args) + except ValueError as e: + console.print(f"[bold red]Error:[/bold red] {str(e)}") + else: + parser.print_help() diff --git a/utils.py b/utils.py index ce94de4..9bc7788 100644 --- a/utils.py +++ b/utils.py @@ -5,6 +5,7 @@ import sys from typing import Union, List +from .log import log def add_path(path, prepend=False): @@ -45,16 +46,22 @@ def add_path(path, prepend=False): add_path((comfy_dir / "custom_nodes")) -def tensor2pil(image: torch.Tensor) -> Union[Image.Image, List[Image.Image]]: +def tensor2pil(image: torch.Tensor) -> List[Image.Image]: batch_count = 1 if len(image.shape) > 3: batch_count = image.size(0) - if batch_count == 1: - return Image.fromarray( + if batch_count > 1: + out = [] + for i in range(batch_count): + out.extend(tensor2pil(image[i])) + return out + + return [ + Image.fromarray( np.clip(255.0 * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) ) - return [tensor2pil(image[i]) for i in range(batch_count)] + ] def pil2tensor(image: Image.Image | List[Image.Image]) -> torch.Tensor: @@ -71,10 +78,14 @@ def np2tensor(img_np: np.ndarray | List[np.ndarray]) -> torch.Tensor: return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0) -def tensor2np(tensor: torch.Tensor) -> Union[np.ndarray, List[np.ndarray]]: +def tensor2np(tensor: torch.Tensor) -> List[np.ndarray]: batch_count = 1 if len(tensor.shape) > 3: batch_count = tensor.size(0) if batch_count > 1: - return [tensor2np(tensor[i]) for i in range(batch_count)] - return np.clip(255.0 * tensor.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + out = [] + for i in range(batch_count): + out.extend(tensor2np(tensor[i])) + return out + + return [np.clip(255.0 * tensor.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)] diff --git a/web/color_widget.js b/web/color_widget.js deleted file mode 100644 index 4bdca72..0000000 --- a/web/color_widget.js +++ /dev/null @@ -1,194 +0,0 @@ -// Define the Color Picker widget class -import parseCss from '/extensions/mtb/extern/parse-css.js' -import { app } from "/scripts/app.js"; -import { ComfyWidgets } from "/scripts/widgets.js"; - -export function CUSTOM_INT(node, inputName, val, func, config = {}) { - return { - widget: node.addWidget( - "number", - inputName, - val, - func, - Object.assign({}, { min: 0, max: 4096, step: 640, precision: 0 }, config) - ), - }; -} -const dumb_call = (v, d, node) => { - console.log("dumb_call", { v, d, node }); -} -function isColorBright(rgb, threshold = 240) { - const brightess = getBrightness(rgb) - - return brightess > threshold -} - -function getBrightness(rgbObj) { - return Math.round(((parseInt(rgbObj[0]) * 299) + (parseInt(rgbObj[1]) * 587) + (parseInt(rgbObj[2]) * 114)) / 1000) -} - - -/** - * @returns {import("/types/litegraph").IWidget} widget - */ -const custom = (key, val, compute = false) => { - /** @type {import("/types/litegraph").IWidget} */ - const widget = {} - // widget.y = 0; - widget.name = key; - widget.type = "COLOR"; - widget.options = { default: "#ff0000" }; - widget.value = val || "#ff0000"; - widget.draw = function (ctx, - node, - widgetWidth, - widgetY, - height) { - const border = 3; - - // draw a rect with a border and a fill color - ctx.fillStyle = "#000"; - ctx.fillRect(0, widgetY, widgetWidth, height); - ctx.fillStyle = this.value; - ctx.fillRect(border, widgetY + border, widgetWidth - border * 2, height - border * 2); - // write the input name - // choose the fill based on the luminoisty of this.value color - const color = parseCss(this.value.default || this.value) - if (!color) { - return - } - ctx.fillStyle = isColorBright(color.values, 125) ? "#000" : "#fff"; - - - ctx.font = "14px Arial"; - ctx.textAlign = "center"; - ctx.fillText(this.name, widgetWidth * 0.5, widgetY + 14); - - - - // ctx.strokeStyle = "#fff"; - // ctx.strokeRect(border, widgetY + border, widgetWidth - border * 2, height - border * 2); - - - // ctx.fillStyle = "#000"; - // ctx.fillRect(widgetWidth/2 - border / 2 , widgetY + border / 2 , widgetWidth/2 + border / 2, height + border / 2); - // ctx.fillStyle = this.value; - // ctx.fillRect(widgetWidth/2, widgetY, widgetWidth/2, height); - - } - widget.mouse = function (e, pos, node) { - if (e.type === "pointerdown") { - // get widgets of type type : "COLOR" - const widgets = node.widgets.filter(w => w.type === "COLOR"); - - for (const w of widgets) { - // color picker - const rect = [w.last_y, w.last_y + 32]; - if (pos[1] > rect[0] && pos[1] < rect[1]) { - console.log("color picker", node) - const picker = document.createElement("input"); - picker.type = "color"; - picker.value = this.value; - // picker.style.position = "absolute"; - // picker.style.left = ( pos[0]) + "px"; - // picker.style.top = ( pos[1]) + "px"; - - // place at screen center - // picker.style.position = "absolute"; - // picker.style.left = (window.innerWidth / 2) + "px"; - // picker.style.top = (window.innerHeight / 2) + "px"; - // picker.style.transform = "translate(-50%, -50%)"; - // picker.style.zIndex = 1000; - - - - document.body.appendChild(picker); - - picker.addEventListener("change", () => { - this.value = picker.value; - node.graph._version++; - node.setDirtyCanvas(true, true); - document.body.removeChild(picker); - }); - - // simulate click with screen center - const pointer_event = new MouseEvent('click', { - bubbles: false, - // cancelable: true, - pointerType: "mouse", - clientX: window.innerWidth / 2, - clientY: window.innerHeight / 2, - x: window.innerWidth / 2, - y: window.innerHeight / 2, - offsetX: window.innerWidth / 2, - offsetY: window.innerHeight / 2, - screenX: window.innerWidth / 2, - screenY: window.innerHeight / 2, - - - }); - console.log(e) - picker.dispatchEvent(pointer_event); - - } - } - } - } - widget.computeSize = function (width) { - return [width, 32]; - } - - return widget; -} - -app.registerExtension({ - name: "mtb.ColorPicker", - - async beforeRegisterNodeDef(nodeType, nodeData, app) { - - //console.log("mtb.ColorPicker", { nodeType, nodeData, app }); - const rinputs = nodeData.input?.required; // object with key/value pairs, "0" is the type - // console.log(nodeData.name, { nodeType, nodeData, app }); - - if (!rinputs) return; - - - let has_color = false; - for (const [key, input] of Object.entries(rinputs)) { - if (input[0] === "COLOR") { - has_color = true; - // input[1] = { default: "#ff0000" }; - - } - } - - if (!has_color) return; - - const onNodeCreated = nodeType.prototype.onNodeCreated; - nodeType.prototype.onNodeCreated = function () { - const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; - this.serialize_widgets = true; - // if (rinputs[0] === "COLOR") { - // console.log(nodeData.name, { nodeType, nodeData, app }); - - // loop through the inputs to find the color inputs - for (const [key, input] of Object.entries(rinputs)) { - if (input[0] === "COLOR") { - let widget = custom(key, input[1]) - - this.addCustomWidget(widget) - } - // } - } - - this.onRemoved = function () { - // When removing this node we need to remove the input from the DOM - for (let y in this.widgets) { - if (this.widgets[y].canvas) { - this.widgets[y].canvas.remove(); - } - } - }; - } - } -}); diff --git a/web/comfy_shared.js b/web/comfy_shared.js new file mode 100644 index 0000000..f6a2232 --- /dev/null +++ b/web/comfy_shared.js @@ -0,0 +1,282 @@ +import { app } from "/scripts/app.js"; + +export const log = (...args) => { + if (window.MTB_DEBUG) { + console.debug(...args); + } + +} + +//- WIDGET UTILS +export const CONVERTED_TYPE = "converted-widget"; + +export function offsetDOMWidget(widget, ctx, node, widgetWidth, widgetY, height) { + const margin = 10; + const elRect = ctx.canvas.getBoundingClientRect(); + const transform = new DOMMatrix() + .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height) + .multiplySelf(ctx.getTransform()) + .translateSelf(margin, margin + widgetY); + + const scale = new DOMMatrix().scaleSelf(transform.a, transform.d) + Object.assign(widget.inputEl.style, { + transformOrigin: "0 0", + transform: scale, + left: `${transform.a + transform.e}px`, + top: `${transform.d + transform.f}px`, + width: `${widgetWidth - (margin * 2)}px`, + // height: `${(widget.parent?.inputHeight || 32) - (margin * 2)}px`, + height: `${(height || widget.parent?.inputHeight || 32) - (margin * 2)}px`, + + position: "absolute", + background: (!node.color) ? '' : node.color, + color: (!node.color) ? '' : 'white', + zIndex: app.graph._nodes.indexOf(node), + }) +} + +/** + * Extracts the type and link type from a widget config object. + * @param {*} config + * @returns + */ +export function getWidgetType(config) { + // Special handling for COMBO so we restrict links based on the entries + let type = config[0]; + let linkType = type; + if (type instanceof Array) { + type = "COMBO"; + linkType = linkType.join(","); + } + return { type, linkType }; +} + +export const dynamic_connection = (node, index, connected, connectionPrefix = "input_", connectionType = "PSDLAYER") => { + + // remove all non connected inputs + if (!connected && node.inputs.length > 1) { + log(`Removing input ${index} (${node.inputs[index].name})`) + if (node.widgets) { + const w = node.widgets.find((w) => w.name === node.inputs[index].name); + if (w) { + w.onRemove?.(); + node.widgets.length = node.widgets.length - 1 + } + } + node.removeInput(index) + + // make inputs sequential again + for (let i = 0; i < node.inputs.length; i++) { + node.inputs[i].label = `${connectionPrefix}${i + 1}` + } + } + + // add an extra input + if (node.inputs[node.inputs.length - 1].link != undefined) { + log(`Adding input ${node.inputs.length + 1} (${connectionPrefix}${node.inputs.length + 1})`) + + node.addInput(`${connectionPrefix}${node.inputs.length + 1}`, connectionType) + } + +} + + +/** + * Appends a callback to the extra menu options of a given node type. + * @param {*} nodeType + * @param {*} cb + */ +export function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +export function hideWidget(node, widget, suffix = "") { + widget.origType = widget.type; + widget.hidden = true + widget.origComputeSize = widget.computeSize; + widget.origSerializeValue = widget.serializeValue; + widget.computeSize = () => [0, -4]; // -4 is due to the gap litegraph adds between widgets automatically + widget.type = CONVERTED_TYPE + suffix; + widget.serializeValue = () => { + // Prevent serializing the widget if we have no input linked + const { link } = node.inputs.find((i) => i.widget?.name === widget.name); + if (link == null) { + return undefined; + } + return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidget(node, w, ":" + widget.name); + } + } +} + +export function showWidget(widget) { + widget.type = widget.origType; + widget.computeSize = widget.origComputeSize; + widget.serializeValue = widget.origSerializeValue; + + delete widget.origType; + delete widget.origComputeSize; + delete widget.origSerializeValue; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + showWidget(w); + } + } +} + +export function convertToWidget(node, widget) { + showWidget(widget); + const sz = node.size; + node.removeInput(node.inputs.findIndex((i) => i.widget?.name === widget.name)); + + for (const widget of node.widgets) { + widget.last_y -= LiteGraph.NODE_SLOT_HEIGHT; + } + + // Restore original size but grow if needed + node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])]); +} + + +export function convertToInput(node, widget, config) { + hideWidget(node, widget); + + const { linkType } = getWidgetType(config); + + // Add input and store widget config for creating on primitive node + const sz = node.size; + node.addInput(widget.name, linkType, { + widget: { name: widget.name, config }, + }); + + for (const widget of node.widgets) { + widget.last_y += LiteGraph.NODE_SLOT_HEIGHT; + } + + // Restore original size but grow if needed + node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])]); +} + +export function hideWidgetForGood(node, widget, suffix = "") { + widget.origType = widget.type; + widget.origComputeSize = widget.computeSize; + widget.origSerializeValue = widget.serializeValue; + widget.computeSize = () => [0, -4]; // -4 is due to the gap litegraph adds between widgets automatically + widget.type = CONVERTED_TYPE + suffix; + // widget.serializeValue = () => { + // // Prevent serializing the widget if we have no input linked + // const w = node.inputs?.find((i) => i.widget?.name === widget.name); + // if (w?.link == null) { + // return undefined; + // } + // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + // }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidgetForGood(node, w, ":" + widget.name); + } + } +} + +export function fixWidgets(node) { + if (node.inputs) { + for (const input of node.inputs) { + log(input) + if (input.widget || node.widgets) { + // if (newTypes.includes(input.type)) { + const matching_widget = node.widgets.find((w) => w.name === input.name); + if (matching_widget) { + + + // if (matching_widget.hidden) { + // log(`Already hidden skipping ${matching_widget.name}`) + // continue + // } + const w = node.widgets.find((w) => w.name === matching_widget.name); + if (w && w.type != CONVERTED_TYPE) { + log(w) + log(`hidding ${w.name}(${w.type}) from ${node.type}`) + log(node) + hideWidget(node, w); + } else { + log(`converting to widget ${w}`) + + convertToWidget(node, input) + } + } + } + } + } +} +export function inner_value_change(widget, value, event = undefined) { + if (widget.type == "number" || widget.type == "BBOX") { + value = Number(value); + } else if (widget.type == "BOOL") { + value = Boolean(value) + } + widget.value = value; + if (widget.options && widget.options.property && node.properties[widget.options.property] !== undefined) { + node.setProperty(widget.options.property, value); + } + if (widget.callback) { + widget.callback(widget.value, app.canvas, node, pos, event); + } +} + +//- COLOR UTILS +export function isColorBright(rgb, threshold = 240) { + const brightess = getBrightness(rgb) + return brightess > threshold +} + +function getBrightness(rgbObj) { + return Math.round(((parseInt(rgbObj[0]) * 299) + (parseInt(rgbObj[1]) * 587) + (parseInt(rgbObj[2]) * 114)) / 1000) +} + +//- HTML / CSS UTILS +export function defineClass(className, classStyles) { + const styleSheets = document.styleSheets; + + // Helper function to check if the class exists in a style sheet + function classExistsInStyleSheet(styleSheet) { + const rules = styleSheet.rules || styleSheet.cssRules; + for (const rule of rules) { + if (rule.selectorText === `.${className}`) { + return true; + } + } + return false; + } + + // Check if the class is already defined in any of the style sheets + let classExists = false; + for (const styleSheet of styleSheets) { + if (classExistsInStyleSheet(styleSheet)) { + classExists = true; + break; + } + } + + // If the class doesn't exist, add the new class definition to the first style sheet + if (!classExists) { + if (styleSheets[0].insertRule) { + styleSheets[0].insertRule(`.${className} { ${classStyles} }`, 0); + } else if (styleSheets[0].addRule) { + styleSheets[0].addRule(`.${className}`, classStyles, 0); + } + } +} diff --git a/web/debug.js b/web/debug.js new file mode 100644 index 0000000..00079e1 --- /dev/null +++ b/web/debug.js @@ -0,0 +1,80 @@ +import { app } from "/scripts/app.js"; +import * as shared from '/extensions/mtb/comfy_shared.js' +import { log } from '/extensions/mtb/comfy_shared.js' +import { MtbWidgets } from '/extensions/mtb/mtb_widgets.js' + +// TODO: respect inputs order... + + + +app.registerExtension({ + name: "mtb.Debug", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "Debug (mtb)") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + const r = onConnectionsChange ? onConnectionsChange.apply(this, arguments) : undefined; + // TODO: remove all widgets on disconnect once computed + shared.dynamic_connection(this, index, connected, "anything_", "*") + + //- infer type + if (link_info) { + const fromNode = this.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id); + const type = fromNode.outputs[link_info.origin_slot].type; + this.inputs[index].type = type; + // this.inputs[index].label = type.toLowerCase() + } + //- restore dynamic input + if (!connected) { + this.inputs[index].type = "*"; + this.inputs[index].label = `anything_${index + 1}` + } + } + + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + log(message) + onExecuted?.apply(this, arguments); + log(message) + if (this.widgets) { + // const pos = this.widgets.findIndex((w) => w.name === "anything_1"); + // if (pos !== -1) { + for (let i = 0; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = 0; + + } + let widgetI = 1 + if (message.text) { + for (const txt of message.text) { + const w = this.addCustomWidget(MtbWidgets.DEBUG_STRING(txt, widgetI)) + w.parent = this; + widgetI++; + } + } + if (message.b64_images) { + for (const img of message.b64_images) { + const w = this.addCustomWidget(MtbWidgets.DEBUG_IMG(img, widgetI)) + w.parent = this; + widgetI++; + } + // this.onResize?.(this.size); + // this.resize?.(this.size) + this.setSize(this.computeSize()) + }; + + this.onRemoved = function () { + // When removing this node we need to remove the input from the DOM + for (let y in this.widgets) { + if (this.widgets[y].canvas) { + this.widgets[y].canvas.remove(); + } + this.widgets[y].onRemove?.(); + } + } + } + } + } +} +); diff --git a/web/mtb_widgets.js b/web/mtb_widgets.js new file mode 100644 index 0000000..3261636 --- /dev/null +++ b/web/mtb_widgets.js @@ -0,0 +1,1081 @@ +import { app } from "/scripts/app.js"; +import parseCss from '/extensions/mtb/extern/parse-css.js' +import * as shared from '/extensions/mtb/comfy_shared.js' + +import { api } from "/scripts/api.js"; + +import { ComfyWidgets } from "/scripts/widgets.js"; + +const newTypes = ["BOOL", "COLOR", "BBOX"] + + +export const MtbWidgets = { + BBOX: (key, val) => { + /** @type {import("./types/litegraph").IWidget} */ + const widget = { + name: key, + type: "BBOX", + // options: val, + y: 0, + value: val?.default || [0, 0, 0, 0], + options: {}, + + draw: function (ctx, + node, + widget_width, + widgetY, + height) { + const hide = this.type !== "BBOX" && app.canvas.ds.scale > 0.5; + + const show_text = true; + const outline_color = LiteGraph.WIDGET_OUTLINE_COLOR; + const background_color = LiteGraph.WIDGET_BGCOLOR; + const text_color = LiteGraph.WIDGET_TEXT_COLOR; + const secondary_text_color = LiteGraph.WIDGET_SECONDARY_TEXT_COLOR; + const H = LiteGraph.NODE_WIDGET_HEIGHT; + + var margin = 15; + var numWidgets = 4; // Number of stacked widgets + + if (hide) return; + + for (let i = 0; i < numWidgets; i++) { + let currentY = widgetY + i * (H + margin); // Adjust Y position for each widget + + ctx.textAlign = "left"; + ctx.strokeStyle = outline_color; + ctx.fillStyle = background_color; + ctx.beginPath(); + if (show_text) + ctx.roundRect(margin, currentY, widget_width - margin * 2, H, [H * 0.5]); + else + ctx.rect(margin, currentY, widget_width - margin * 2, H); + ctx.fill(); + if (show_text) { + if (!this.disabled) + ctx.stroke(); + ctx.fillStyle = text_color; + if (!this.disabled) { + ctx.beginPath(); + ctx.moveTo(margin + 16, currentY + 5); + ctx.lineTo(margin + 6, currentY + H * 0.5); + ctx.lineTo(margin + 16, currentY + H - 5); + ctx.fill(); + ctx.beginPath(); + ctx.moveTo(widget_width - margin - 16, currentY + 5); + ctx.lineTo(widget_width - margin - 6, currentY + H * 0.5); + ctx.lineTo(widget_width - margin - 16, currentY + H - 5); + ctx.fill(); + } + ctx.fillStyle = secondary_text_color; + ctx.fillText(this.label || this.name, margin * 2 + 5, currentY + H * 0.7); + ctx.fillStyle = text_color; + ctx.textAlign = "right"; + + ctx.fillText( + Number(this.value).toFixed( + this.options?.precision !== undefined + ? this.options.precision + : 3 + ), + widget_width - margin * 2 - 20, + currentY + H * 0.7 + ); + } + } + }, + mouse: function (event, pos, node) { + var old_value = this.value; + var x = pos[0] - node.pos[0]; + var y = pos[1] - node.pos[1]; + var width = node.size[0]; + var H = LiteGraph.NODE_WIDGET_HEIGHT; + var margin = 5; + var numWidgets = 4; // Number of stacked widgets + + for (let i = 0; i < numWidgets; i++) { + let currentY = y + i * (H + margin); // Adjust Y position for each widget + + + if (event.type == LiteGraph.pointerevents_method + "move" && this.type == "BBOX") { + if (event.deltaX) + this.value += event.deltaX * 0.1 * (this.options?.step || 1); + if (this.options.min != null && this.value < this.options.min) { + this.value = this.options.min; + } + if (this.options.max != null && this.value > this.options.max) { + this.value = this.options.max; + } + } else if (event.type == LiteGraph.pointerevents_method + "down") { + var values = this.options?.values; + if (values && values.constructor === Function) { + values = this.options.values(w, node); + } + var values_list = null; + + var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0; + if (this.type == "BBOX") { + this.value += delta * 0.1 * (this.options.step || 1); + if (this.options.min != null && this.value < this.options.min) { + this.value = this.options.min; + } + if (this.options.max != null && this.value > this.options.max) { + this.value = this.options.max; + } + } else if (delta) { //clicked in arrow, used for combos + var index = -1; + this.last_mouseclick = 0; //avoids dobl click event + if (values.constructor === Object) + index = values_list.indexOf(String(this.value)) + delta; + else + index = values_list.indexOf(this.value) + delta; + if (index >= values_list.length) { + index = values_list.length - 1; + } + if (index < 0) { + index = 0; + } + if (values.constructor === Array) + this.value = values[index]; + else + this.value = index; + } + } //end mousedown + else if (event.type == LiteGraph.pointerevents_method + "up" && this.type == "BBOX") { + var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0; + if (event.click_time < 200 && delta == 0) { + this.prompt("Value", this.value, function (v) { + // check if v is a valid equation or a number + if (/^[0-9+\-*/()\s]+|\d+\.\d+$/.test(v)) { + try {//solve the equation if possible + v = eval(v); + } catch (e) { } + } + this.value = Number(v); + shared.inner_value_change(this, this.value, event); + }.bind(w), + event); + } + } + + if (old_value != this.value) + setTimeout( + function () { + shared.inner_value_change(this, this.value, event); + }.bind(this), + 20 + ); + + app.canvas.setDirty(true); + } + + }, + computeSize: function (width) { + return [width, LiteGraph.NODE_WIDGET_HEIGHT * 4]; + }, + // onDrawBackground: function (ctx) { + // if (!this.flags.collapsed) return; + // this.inputEl.style.display = "block"; + // this.inputEl.style.top = this.graphcanvas.offsetTop + this.pos[1] + "px"; + // this.inputEl.style.left = this.graphcanvas.offsetLeft + this.pos[0] + "px"; + // }, + // onInputChange: function (e) { + // const property = e.target.dataset.property; + // const bbox = this.getInputData(0); + // if (!bbox) return; + // bbox[property] = parseFloat(e.target.value); + // this.setOutputData(0, bbox); + // } + } + + widget.desc = "Represents a Bounding Box with x, y, width, and height."; + return widget + + }, + BOOL: (key, val, compute = false) => { + /** @type {import("/types/litegraph").IWidget} */ + const widget = { + name: key, + type: "BOOL", + options: { default: false }, + y: 0, + value: val || false, + draw: function (ctx, + node, + widget_width, + widgetY, + height) { + const hide = this.type !== "BOOL" && app.canvas.ds.scale > 0.5 + if (hide) { + return + } + const outline_color = LiteGraph.WIDGET_OUTLINE_COLOR; + const background_color = LiteGraph.WIDGET_BGCOLOR; + const text_color = LiteGraph.WIDGET_TEXT_COLOR; + const H = LiteGraph.NODE_WIDGET_HEIGHT; + const arrowSize = 8; + + var margin = 15; + if (hide) return; + + var currentY = widgetY; + + ctx.textAlign = "left"; + ctx.strokeStyle = outline_color; + ctx.fillStyle = background_color; + ctx.beginPath(); + // ctx.roundRect(margin, currentY, widget_width - margin * 2, H, [H * 0.5]); + ctx.rect(margin, currentY, H, H); // Draw checkbox square + + ctx.fill(); + ctx.stroke(); + + ctx.fillStyle = text_color; + // ctx.fillText(this.label || this.name, margin * 2 + 5, currentY + H * 0.7); + ctx.fillText(this.label || this.name, H + margin * 2, currentY + H * 0.7); + + + // Draw arrow if the value is true + // Draw checkmark if the value is true + if (this.value) { + ctx.fillStyle = text_color; + ctx.beginPath(); + ctx.moveTo(margin + H * 0.15, currentY + H * 0.5); + ctx.lineTo(margin + H * 0.4, currentY + H * 0.8); + ctx.lineTo(margin + H * 0.85, currentY + H * 0.2); + ctx.stroke(); + } + + }, + get value() { + + return this.inputEl.value === "true"; + }, + set value(x) { + this.inputEl.value = x; + }, + computeSize: function (width) { + return [width, 32]; + }, + mouse: function (event, pos, node) { + // var x = pos[0] - node.pos[0]; + // var y = pos[1] - node.pos[1]; + // var width = node.size[0]; + // var H = LiteGraph.NODE_WIDGET_HEIGHT; + // var margin = 15; + + // if (event.type == LiteGraph.pointerevents_method + "down") { + // if (x > margin && x < widget_width - margin && y > widgetY && y < widgetY + H) { + // this.value = !this.value; // Toggle checkbox value + // shared.inner_value_change(this, this.value, event); + // app.canvas.setDirty(true); + // } + // } + if (event.type === "pointerdown") { + // get widgets of type type : "COLOR" + const widgets = node.widgets.filter(w => w.type === "BOOL"); + + for (const w of widgets) { + // color picker + const rect = [w.last_y, w.last_y + 32]; + if (pos[1] > rect[0] && pos[1] < rect[1]) { + // picker.style.position = "absolute"; + // picker.style.left = ( pos[0]) + "px"; + // picker.style.top = ( pos[1]) + "px"; + + // place at screen center + // picker.style.position = "absolute"; + // picker.style.left = (window.innerWidth / 2) + "px"; + // picker.style.top = (window.innerHeight / 2) + "px"; + // picker.style.transform = "translate(-50%, -50%)"; + // picker.style.zIndex = 1000; + console.log("Clicked a BOOL", this.value) + + this.value = this.value ? "false" : "true" + + } + } + } + } + } + + // create a checkbox + widget.inputEl = document.createElement("input") + widget.inputEl.type = "checkbox" + widget.inputEl.value = false + document.body.appendChild(widget.inputEl); + return widget + + }, + COLOR: (key, val, compute = false) => { + /** @type {import("/types/litegraph").IWidget} */ + const widget = {} + widget.y = 0 + widget.name = key; + widget.type = "COLOR"; + widget.options = { default: "#ff0000" }; + widget.value = val || "#ff0000"; + widget.draw = function (ctx, + node, + widgetWidth, + widgetY, + height) { + const hide = this.type !== "COLOR" && app.canvas.ds.scale > 0.5 + if (hide) { + return + } + + const border = 3; + // draw a rect with a border and a fill color + ctx.fillStyle = "#000"; + ctx.fillRect(0, widgetY, widgetWidth, height); + ctx.fillStyle = this.value; + ctx.fillRect(border, widgetY + border, widgetWidth - border * 2, height - border * 2); + // write the input name + // choose the fill based on the luminoisty of this.value color + const color = parseCss(this.value.default || this.value) + if (!color) { + return + } + ctx.fillStyle = shared.isColorBright(color.values, 125) ? "#000" : "#fff"; + + + ctx.font = "14px Arial"; + ctx.textAlign = "center"; + ctx.fillText(this.name, widgetWidth * 0.5, widgetY + 14); + + + + // ctx.strokeStyle = "#fff"; + // ctx.strokeRect(border, widgetY + border, widgetWidth - border * 2, height - border * 2); + + + // ctx.fillStyle = "#000"; + // ctx.fillRect(widgetWidth/2 - border / 2 , widgetY + border / 2 , widgetWidth/2 + border / 2, height + border / 2); + // ctx.fillStyle = this.value; + // ctx.fillRect(widgetWidth/2, widgetY, widgetWidth/2, height); + + } + widget.mouse = function (e, pos, node) { + if (e.type === "pointerdown") { + // get widgets of type type : "COLOR" + const widgets = node.widgets.filter(w => w.type === "COLOR"); + + for (const w of widgets) { + // color picker + const rect = [w.last_y, w.last_y + 32]; + if (pos[1] > rect[0] && pos[1] < rect[1]) { + console.log("color picker", node) + const picker = document.createElement("input"); + picker.type = "color"; + picker.value = this.value; + // picker.style.position = "absolute"; + // picker.style.left = ( pos[0]) + "px"; + // picker.style.top = ( pos[1]) + "px"; + + // place at screen center + picker.style.position = "absolute"; + picker.style.left = "999999px"//(window.innerWidth / 2) + "px"; + picker.style.top = "999999px" //(window.innerHeight / 2) + "px"; + // picker.style.transform = "translate(-50%, -50%)"; + // picker.style.zIndex = 1000; + + + + document.body.appendChild(picker); + + picker.addEventListener("change", () => { + this.value = picker.value; + node.graph._version++; + node.setDirtyCanvas(true, true); + picker.remove(); + }); + + picker.click() + + } + } + } + } + widget.computeSize = function (width) { + return [width, 32]; + } + + return widget; + }, + + DEBUG_IMG: (val, index) => { + const w = { + name: `anything_${index}`, + type: "image", + value: val, + draw: function (ctx, + node, + widgetWidth, + widgetY, + height) { + const [cw, ch] = this.computeSize(widgetWidth) + shared.offsetDOMWidget(this, ctx, node, widgetWidth, widgetY, ch) + }, + computeSize: function (width) { + const ratio = this.inputRatio || 1; + if (width) { + return [width, width / ratio + 4] + } + return [128, 128] + }, + onRemove: function () { + if (this.inputEl) { + this.inputEl.remove(); + } + } + } + + w.inputEl = document.createElement("img"); + w.inputEl.src = w.value; + w.inputEl.onload = function () { + w.inputRatio = w.inputEl.naturalWidth / w.inputEl.naturalHeight; + } + document.body.appendChild(w.inputEl); + return w + }, + DEBUG_STRING: (val, index) => { + const w = { + name: `anything_${index}`, + type: "debug_text", + val: val, + draw: function (ctx, + node, + widgetWidth, + widgetY, + height) { + // const [cw, ch] = this.computeSize(widgetWidth) + shared.offsetDOMWidget(this, ctx, node, widgetWidth, widgetY, height) + }, + computeSize: function (width) { + const value = this.inputEl.innerHTML + if (!value) { + return [32, 32] + } + if (!width) { + log(`No width ${this.parent.size}`) + } + + const fontSize = 25; // Assuming 1rem = 16px + + const oldFont = app.ctx.font + app.ctx.font = `${fontSize}px Arial`; + + const words = value.split(" "); + const lines = []; + let currentLine = ""; + for (const word of words) { + const testLine = currentLine.length === 0 ? word : `${currentLine} ${word}`; + + const testWidth = app.ctx.measureText(testLine).width; + + // log(`Testing line ${testLine}, width: ${testWidth}, width: ${width}, ratio: ${testWidth / width}`) + if (testWidth > width) { + lines.push(currentLine); + currentLine = word; + } else { + currentLine = testLine; + } + } + app.ctx.font = oldFont; + lines.push(currentLine); + + // Step 3: Calculate the widget width and height + const textHeight = lines.length * (fontSize + 2); // You can adjust the line height (2 in this case) + const maxLineWidth = lines.reduce((maxWidth, line) => Math.max(maxWidth, app.ctx.measureText(line).width), 0); + const widgetWidth = Math.max(width || this.width || 32, maxLineWidth); + const widgetHeight = textHeight + 10; // Additional padding for spacing + return [widgetWidth, widgetHeight + 4] + + }, + onRemove: function () { + if (this.inputEl) { + this.inputEl.remove(); + } + + } + } + w.inputEl = document.createElement("p"); + w.inputEl.style.textAlign = "center"; + w.inputEl.style.fontSize = "1.5em"; + w.inputEl.style.color = "var(--input-text)"; + w.inputEl.style.fontFamily = "monospace"; + w.inputEl.innerHTML = val + document.body.appendChild(w.inputEl); + + return w + } +} + + + +const bboxWidgetDOM = (key, val) => { + /** @type {import("./types/litegraph").IWidget} */ + const widget = { + name: key, + type: "BBOX", + // options: val, + y: 0, + value: val || [0, 0, 0, 0], + + draw: function (ctx, + node, + widgetWidth, + widgetY, + height) { + const hide = this.type !== "BBOX" && app.canvas.ds.scale > 0.5 + this.inputEl.style.display = hide ? "none" : "block"; + if (hide) return; + + shared.offsetDOMWidget(this, ctx, node, widgetWidth, widgetY, height) + }, + computeSize: function (width) { + return [width, 32]; + }, + // onDrawBackground: function (ctx) { + // if (!this.flags.collapsed) return; + // this.inputEl.style.display = "block"; + // this.inputEl.style.top = this.graphcanvas.offsetTop + this.pos[1] + "px"; + // this.inputEl.style.left = this.graphcanvas.offsetLeft + this.pos[0] + "px"; + // }, + onInputChange: function (e) { + const property = e.target.dataset.property; + const bbox = this.getInputData(0); + if (!bbox) return; + bbox[property] = parseFloat(e.target.value); + this.setOutputData(0, bbox); + } + } + widget.inputEl = document.createElement("div") + widget.parent = this + widget.inputEl.innerHTML = ` +
+
+
+
+ `; + // set the class document wide if not present + + shared.defineClass("bbox-input", `background-color: var(--comfy-input-bg); + color: var(--input-text); + overflow: hidden; + width:100%; + overflow-y: auto; + padding: 2px; + resize: none; + border: none; + box-sizing: border-box; + font-size: 10px;`) + + + const bboxInputs = widget.inputEl.querySelectorAll(".bbox-input"); + bboxInputs.forEach((input) => { + input.addEventListener("change", widget.onInputChange.bind(this)); + }); + + widget.desc = "Represents a Bounding Box with x, y, width, and height."; + + document.body.appendChild(widget.inputEl); + + + console.log("Bounding Box Widget DOM", widget.inputEl) + return widget + +} +/** + * @returns {import("./types/litegraph").IWidget} widget + */ + + +/** + * @returns {import("./types/litegraph").IWidget} widget + */ + + + +// VIDEO: (node, inputName, inputData, app) => { +// console.log("video") +// const videoWidget = { +// name: "VideoWidget", +// description: "Video Player Widget", +// value: inputData, +// properties: {}, +// widget: null, + +// init: function () { +// this.widget = document.createElement("video"); +// this.widget.width = 200; +// this.widget.height = 120; +// this.widget.controls = true; +// this.widget.style.width = "100%"; +// this.widget.style.height = "100%"; +// this.widget.style.objectFit = "contain"; +// this.widget.style.backgroundColor = "black"; +// this.widget.style.pointerEvents = "none"; +// node.addWidget(inputName, videoWidget.widget, inputData); +// }, + +// setValue: function (value, options) { +// if (value instanceof HTMLVideoElement) { +// this.widget.src = value.src; +// } else if (typeof value === "string") { +// this.widget.src = value; +// } +// }, + +// getValue: function () { +// return this.widget.src; +// }, + +// append: function (parent) { +// parent.appendChild(this.widget); +// }, + +// remove: function () { +// this.widget.parentNode.removeChild(this.widget); +// } +// }; +// return { +// widget: videoWidget, +// } +// } + + + +/** + * @returns {import("./types/comfy").ComfyExtension} extension + */ +const mtb_widgets = { + name: "mtb.widgets", + + init: async () => { + console.log("Registering mtb.widgets") + try { + + const res = await api.fetchApi('/mtb/debug') + const msg = await res.json() + window.MTB_DEBUG = msg.enabled; + } + catch (e) { + console.error('Error:', error); + } + }, + + setup: () => { + app.ui.settings.addSetting({ + id: "mtb.Debug.enabled", + name: "[mtb] Enable Debug (py and js)", + type: "boolean", + defaultValue: false, + + tooltip: + "This will enable debug messages in the console and in the python console respectively", + attrs: { + style: { + fontFamily: "monospace", + }, + }, + async onChange(value) { + if (value) { + console.log("Enabled DEBUG mode") + } + window.MTB_DEBUG = value; + await api.fetchApi('/mtb/debug', { + method: 'POST', + body: JSON.stringify({ + enabled: value + + }) + }).then(response => { }).catch(error => { + console.error('Error:', error); + }); + + }, + }); + }, + + + getCustomWidgets: function () { + return { + BOOL: (node, inputName, inputData, app) => { + console.debug("Registering bool") + + return { + widget: node.addCustomWidget(MtbWidgets.BOOL(inputName, inputData[1]?.default || false)), + minWidth: 150, + minHeight: 30, + }; + }, + + COLOR: (node, inputName, inputData, app) => { + console.debug("Registering color") + return { + widget: node.addCustomWidget(MtbWidgets.COLOR(inputName, inputData[1]?.default || "#ff0000")), + minWidth: 150, + minHeight: 30, + } + }, + // BBOX: (node, inputName, inputData, app) => { + // console.debug("Registering bbox") + // return { + // widget: node.addCustomWidget(MtbWidgets.BBOX(inputName, inputData[1]?.default || [0, 0, 0, 0])), + // minWidth: 150, + // minHeight: 30, + // } + + // } + } + }, + /** + * @param {import("./types/comfy").NodeType} nodeType + * @param {import("./types/comfy").NodeDef} nodeData + * @param {import("./types/comfy").App} app + */ + async beforeRegisterNodeDef(nodeType, nodeData, app) { + + const rinputs = nodeData.input?.required; + + let has_custom = false + if (nodeData.input && nodeData.input.required) { + for (const i of Object.keys(nodeData.input.required)) { + const input_type = nodeData.input.required[i][0] + + if (newTypes.includes(input_type)) { + has_custom = true + break; + } + } + } + if (has_custom) { + + //- Add widgets on node creation + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + this.serialize_widgets = true; + for (const [key, input] of Object.entries(rinputs)) { + switch (input[0]) { + case "COLOR": + // const colW = colorWidget(key, input[1]) + // this.addCustomWidget(colW) + // const associated_input = this.inputs.findIndex((i) => i.widget?.name === key); + // if (associated_input !== -1) { + // this.inputs[associated_input].widget = colW + // } + + + + break; + case "BOOL": + // const widg = boolWidget(key, input[1]) + // this.addCustomWidget(widg) + // this.addWidget("toggle", key, false, function (value, widget, node) { + // console.log(value) + + // }) + //this.removeInput(this.inputs.findIndex((i) => i.widget?.name === key)); + + break; + case "BBOX": + // const bboxW = bboxWidget(key, input[1]) + // this.addCustomWidget(bboxW) + break; + default: + break + } + + + // } + } + + this.setSize?.(this.computeSize()) + + this.onRemoved = function () { + // When removing this node we need to remove the input from the DOM + for (let y in this.widgets) { + if (this.widgets[y].canvas) { + this.widgets[y].canvas.remove(); + } + } + }; + } + + //- Extra menus + const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = origGetExtraMenuOptions ? origGetExtraMenuOptions.apply(this, arguments) : undefined; + if (this.widgets) { + let toInput = []; + let toWidget = []; + for (const w of this.widgets) { + if (w.type === shared.CONVERTED_TYPE) { + //- This is already handled by widgetinputs.js + // toWidget.push({ + // content: `Convert ${w.name} to widget`, + // callback: () => shared.convertToWidget(this, w), + // }); + } else if (newTypes.includes(w.type)) { + const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]; + + toInput.push({ + content: `Convert ${w.name} to input`, + callback: () => shared.convertToInput(this, w, config), + }); + } + } + if (toInput.length) { + options.push(...toInput, null); + } + + if (toWidget.length) { + options.push(...toWidget, null); + } + } + + return r; + }; + + } + + //- Extending Python Nodes + switch (nodeData.name) { + case "Psd Save (mtb)": { + // const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + // const r = onConnectionsChange ? onConnectionsChange.apply(this, arguments) : undefined; + shared.dynamic_connection(this, index, connected) + } + break + } + case "Save Gif (mtb)": { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + const r = onExecuted ? onExecuted.apply(this, message) : undefined; + console.log(message) + if (this.widgets) { + const pos = this.widgets.findIndex((w) => w.name === "anything_0"); + if (pos !== -1) { + for (let i = pos; i < this.widgets.length; i++) { + console.log(this.widgets[i]) + console.log(this) + this.widgets[i].onRemove?.(); + + } + this.widgets.length = pos + + + } + + let imgURLs = [] + if (message && message.gif) { + imgURLs = imgURLs.concat(message.gif.map(params => { + return api.apiURL("/view?" + new URLSearchParams(params).toString()); + })) + console.log(imgURLs) + for (const img of imgURLs) { + const w = this.addCustomWidget(MtbWidgets.DEBUG_IMG(img, 0)) + w.parent = this; + } + } + this.setSize?.(this.computeSize()) + return r + + } + + const onRemoved = nodeType.prototype.onRemoved; + nodeType.prototype.onRemoved = function (message) { + const r = onRemoved ? onRemoved.apply(this, message) : undefined; + if (!this.widgets) return r + for (const w of this.widgets) { + if (w.canvas) { + w.canvas.remove(); + } + w.onRemove?.() + w.onRemoved?.() + } + return r + + } + } + + break + } + case "Animation Builder (mtb)": { + // console.log(nodeType.prototype) + + + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + + this.changeMode(LiteGraph.ALWAYS) + // api.addEventListener("executed", ({ detail }) => { + + // console.log("executed", detail) + // console.log(this) + + // }) + const raw_iteration = this.widgets.find((w) => w.name === "raw_iteration"); + const raw_loop = this.widgets.find((w) => w.name === "raw_loop"); + + + const total_frames = this.widgets.find((w) => w.name === "total_frames"); + const loop_count = this.widgets.find((w) => w.name === "loop_count"); + + shared.hideWidgetForGood(this, raw_iteration); + shared.hideWidgetForGood(this, raw_loop); + + raw_iteration._value = 0 + // Object.defineProperty(raw_iteration, "value", { + // get() { + // return this._value + // }, + // set(value) { + // this._value = value; + // }, + // }); + + const value_preview = ComfyWidgets["STRING"](this, "PREVIEW_raw_iteration", ["STRING", { multiline: true }], app).widget; + value_preview.inputEl.readOnly = true; + value_preview.inputEl.disabled = true; + + + // value_preview.inputEl.style.opacity = 0.6; + value_preview.inputEl.style.textAlign = "center"; + value_preview.inputEl.style.fontSize = "2.5em"; + value_preview.inputEl.style.backgroundColor = "black"; + + value_preview.inputEl.style.setProperty("--comfy-input-bg", "transparent"); + value_preview.inputEl.style.setProperty("background", "red", "important"); + // remove the comfy-multiline-input class + + // disallow user selection + value_preview.inputEl.style.userSelect = "none"; + + const loop_preview = ComfyWidgets["STRING"](this, "PREVIEW_raw_iteration", ["STRING", { multiline: true }], app).widget; + loop_preview.inputEl.readOnly = true; + loop_preview.inputEl.disabled = true; + + + // loop_preview.inputEl.style.opacity = 0.6; + loop_preview.inputEl.style.textAlign = "center"; + loop_preview.inputEl.style.fontSize = "1.5em"; + loop_preview.inputEl.style.backgroundColor = "black"; + + loop_preview.inputEl.style.setProperty("--comfy-input-bg", "transparent"); + loop_preview.inputEl.style.setProperty("background", "red", "important"); + // remove the comfy-multiline-input class + + // disallow user selection + loop_preview.inputEl.style.userSelect = "none"; + + const onReset = () => { + raw_iteration.value = 0; + raw_loop.value = 0; + + value_preview.value = 0; + loop_preview.value = 0; + + app.canvas.setDirty(true); + } + + const reset_button = this.addWidget("button", `Reset`, "reset", onReset); + + const run_button = this.addWidget("button", `Queue`, "queue", () => { + onReset() // this could maybe be a setting or checkbox + app.queuePrompt(0, total_frames.value * loop_count.value) + + }); + + + + raw_iteration.afterQueued = function () { + this.value++; + raw_loop.value = Math.floor(this.value / total_frames.value); + value_preview.value = `raw: ${this.value} +frame: ${this.value % total_frames.value}`; + if (raw_loop.value + 1 > loop_count.value) { + loop_preview.value = `Done 😎!` + return + } + + loop_preview.value = `current loop: ${raw_loop.value + 1}/${loop_count.value}` + + } + + return r + + } + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (data) { + onExecuted?.apply(this, data) + if (this.widgets) { + const pos = this.widgets.findIndex((w) => w.name === "preview"); + if (pos !== -1) { + for (let i = pos; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = pos; + } + } + + const w = ComfyWidgets["STRING"](this, "preview", ["STRING", { multiline: true }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.6; + w.value = data.total_frames; + + // this.onResize?.(this.size); + this.setSize?.(this.computeSize()) + + } + // const onAfterExecuteNode = nodeType.prototype.onAfterExecuteNode; + // nodeType.prototype.onAfterExecuteNode = function () { + // onAfterExecuteNode?.apply(this) + // console.log("after", this) + + // } + console.debug(`Registered ${nodeType.name} node extra events`) + break + + } + default: { + break + + } + + } + // const onNodeCreated = nodeType.prototype.onNodeCreated; + + // nodeType.prototype.onNodeCreated = function () { + // const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + + // } + + + + + // console.log(nodeData.output) + // if (nodeData.output.includes("VIDEO") && nodeData.output_node) { + // console.log(`Found video output for ${nodeType}`) + // console.log(nodeData) + + // } + + // if (nodeData.name === "Psd Save (mtb)") { + // console.log(`Found psd node`) + // console.log(nodeData) + + // } + + + + } +}; + + +app.registerExtension(mtb_widgets); \ No newline at end of file