diff --git a/README.md b/README.md
index 2a23dfc8541..0465e5ba792 100644
--- a/README.md
+++ b/README.md
@@ -4,38 +4,27 @@
# Invoke - Professional Creative AI Tools for Visual Media
-#### To learn more about Invoke, or implement our Business solutions, visit [invoke.com]
-
[![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products.
-Invoke is available in two editions:
-
-| **Community Edition** | **Professional Edition** |
-|----------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|
-| **For users looking for a locally installed, self-hosted and self-managed service** | **For users or teams looking for a cloud-hosted, fully managed service** |
-| - Free to use under a commercially-friendly license | - Monthly subscription fee with three different plan levels |
-| - Download and install on compatible hardware | - Offers additional benefits, including multi-user support, improved model training, and more |
-| - Includes all core studio features: generate, refine, iterate on images, and build workflows | - Hosted in the cloud for easy, secure model access and scalability |
-| Quick Start -> [Installation and Updates][installation docs] | More Information -> [www.invoke.com/pricing](https://www.invoke.com/pricing) |
-
+- Free to use under a commercially-friendly license
+- Download and install on compatible hardware
+- Generate, refine, iterate on images, and build workflows

# Documentation
-| **Quick Links** |
-|----------------------------------------------------------------------------------------------------------------------------|
-| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
-# Installation
-
-To get started with Invoke, [Download the Installer](https://www.invoke.com/downloads).
+| **Quick Links** |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
-For detailed step by step instructions, or for instructions on manual/docker installations, visit our documentation on [Installation and Updates][installation docs]
+# Installation
+To get started with Invoke, [Download the Launcher](https://github.com/invoke-ai/launcher/releases/latest).
## Troubleshooting, FAQ and Support
@@ -90,7 +79,6 @@ Original portions of the software are Copyright © 2024 by respective contributo
[features docs]: https://invoke-ai.github.io/InvokeAI/features/database/
[faq]: https://invoke-ai.github.io/InvokeAI/faq/
[contributors]: https://invoke-ai.github.io/InvokeAI/contributing/contributors/
-[invoke.com]: https://www.invoke.com/about
[github issues]: https://github.com/invoke-ai/InvokeAI/issues
[docs home]: https://invoke-ai.github.io/InvokeAI
[installation docs]: https://invoke-ai.github.io/InvokeAI/installation/
diff --git a/invokeai/app/api/routers/app_info.py b/invokeai/app/api/routers/app_info.py
index 5d66c2559ec..d8f3bb2f807 100644
--- a/invokeai/app/api/routers/app_info.py
+++ b/invokeai/app/api/routers/app_info.py
@@ -1,8 +1,5 @@
-import typing
from enum import Enum
from importlib.metadata import distributions
-from pathlib import Path
-from typing import Optional
import torch
from fastapi import Body
@@ -10,7 +7,6 @@
from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
-from invokeai.app.invocations.upscale import ESRGAN_MODELS
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
@@ -27,11 +23,6 @@ class LogLevel(int, Enum):
Critical = logging.CRITICAL
-class Upscaler(BaseModel):
- upscaling_method: str = Field(description="Name of upscaling method")
- upscaling_models: list[str] = Field(description="List of upscaling models for this method")
-
-
app_router = APIRouter(prefix="/v1/app", tags=["app"])
@@ -40,17 +31,6 @@ class AppVersion(BaseModel):
version: str = Field(description="App version")
- highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
-
-
-class AppConfig(BaseModel):
- """App Config Response"""
-
- infill_methods: list[str] = Field(description="List of available infill methods")
- upscaling_methods: list[Upscaler] = Field(description="List of upscaling methods")
- nsfw_methods: list[str] = Field(description="List of NSFW checking methods")
- watermarking_methods: list[str] = Field(description="List of invisible watermark methods")
-
@app_router.get("/version", operation_id="app_version", status_code=200, response_model=AppVersion)
async def get_version() -> AppVersion:
@@ -72,27 +52,9 @@ async def get_app_deps() -> dict[str, str]:
return sorted_deps
-@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
-async def get_config_() -> AppConfig:
- infill_methods = ["lama", "tile", "cv2", "color"] # TODO: add mosaic back
- if PatchMatch.patchmatch_available():
- infill_methods.append("patchmatch")
-
- upscaling_models = []
- for model in typing.get_args(ESRGAN_MODELS):
- upscaling_models.append(str(Path(model).stem))
- upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models)
-
- nsfw_methods = ["nsfw_checker"]
-
- watermarking_methods = ["invisible_watermark"]
-
- return AppConfig(
- infill_methods=infill_methods,
- upscaling_methods=[upscaler],
- nsfw_methods=nsfw_methods,
- watermarking_methods=watermarking_methods,
- )
+@app_router.get("/patchmatch_status", operation_id="get_patchmatch_status", status_code=200, response_model=bool)
+async def get_patchmatch_status() -> bool:
+ return PatchMatch.patchmatch_available()
class InvokeAIAppConfigWithSetFields(BaseModel):
diff --git a/invokeai/app/api/routers/board_videos.py b/invokeai/app/api/routers/board_videos.py
deleted file mode 100644
index 1db8f2784be..00000000000
--- a/invokeai/app/api/routers/board_videos.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from fastapi import Body, HTTPException
-from fastapi.routing import APIRouter
-
-from invokeai.app.services.videos_common import AddVideosToBoardResult, RemoveVideosFromBoardResult
-
-board_videos_router = APIRouter(prefix="/v1/board_videos", tags=["boards"])
-
-
-@board_videos_router.post(
- "/batch",
- operation_id="add_videos_to_board",
- responses={
- 201: {"description": "Videos were added to board successfully"},
- },
- status_code=201,
- response_model=AddVideosToBoardResult,
-)
-async def add_videos_to_board(
- board_id: str = Body(description="The id of the board to add to"),
- video_ids: list[str] = Body(description="The ids of the videos to add", embed=True),
-) -> AddVideosToBoardResult:
- """Adds a list of videos to a board"""
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@board_videos_router.post(
- "/batch/delete",
- operation_id="remove_videos_from_board",
- responses={
- 201: {"description": "Videos were removed from board successfully"},
- },
- status_code=201,
- response_model=RemoveVideosFromBoardResult,
-)
-async def remove_videos_from_board(
- video_ids: list[str] = Body(description="The ids of the videos to remove", embed=True),
-) -> RemoveVideosFromBoardResult:
- """Removes a list of videos from their board, if they had one"""
- raise HTTPException(status_code=501, detail="Not implemented")
diff --git a/invokeai/app/api/routers/boards.py b/invokeai/app/api/routers/boards.py
index ec3b86bcfa2..cf668d5a1a4 100644
--- a/invokeai/app/api/routers/boards.py
+++ b/invokeai/app/api/routers/boards.py
@@ -33,7 +33,6 @@ class DeleteBoardResult(BaseModel):
)
async def create_board(
board_name: str = Query(description="The name of the board to create", max_length=300),
- is_private: bool = Query(default=False, description="Whether the board is private"),
) -> BoardDTO:
"""Creates a board"""
try:
diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py
index 5320bf18eb3..7b4242e013c 100644
--- a/invokeai/app/api/routers/session_queue.py
+++ b/invokeai/app/api/routers/session_queue.py
@@ -2,7 +2,7 @@
from fastapi import Body, HTTPException, Path, Query
from fastapi.routing import APIRouter
-from pydantic import BaseModel, Field
+from pydantic import BaseModel
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus
@@ -16,7 +16,6 @@
DeleteAllExceptCurrentResult,
DeleteByDestinationResult,
EnqueueBatchResult,
- FieldIdentifier,
ItemIdsResult,
PruneResult,
RetryItemsResult,
@@ -37,12 +36,6 @@ class SessionQueueAndProcessorStatus(BaseModel):
processor: SessionProcessorStatus
-class ValidationRunData(BaseModel):
- workflow_id: str = Field(description="The id of the workflow being published.")
- input_fields: list[FieldIdentifier] = Body(description="The input fields for the published workflow")
- output_fields: list[FieldIdentifier] = Body(description="The output fields for the published workflow")
-
-
@session_queue_router.post(
"/{queue_id}/enqueue_batch",
operation_id="enqueue_batch",
@@ -54,10 +47,6 @@ async def enqueue_batch(
queue_id: str = Path(description="The queue id to perform this operation on"),
batch: Batch = Body(description="Batch to process"),
prepend: bool = Body(default=False, description="Whether or not to prepend this batch in the queue"),
- validation_run_data: Optional[ValidationRunData] = Body(
- default=None,
- description="The validation run data to use for this batch. This is only used if this is a validation run.",
- ),
) -> EnqueueBatchResult:
"""Processes a batch and enqueues the output graphs for execution."""
try:
diff --git a/invokeai/app/api/routers/videos.py b/invokeai/app/api/routers/videos.py
deleted file mode 100644
index 36ead345c9a..00000000000
--- a/invokeai/app/api/routers/videos.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from typing import Optional
-
-from fastapi import Body, HTTPException, Path, Query
-from fastapi.routing import APIRouter
-
-from invokeai.app.services.shared.pagination import OffsetPaginatedResults
-from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
-from invokeai.app.services.videos_common import (
- DeleteVideosResult,
- StarredVideosResult,
- UnstarredVideosResult,
- VideoDTO,
- VideoIdsResult,
- VideoRecordChanges,
-)
-
-videos_router = APIRouter(prefix="/v1/videos", tags=["videos"])
-
-
-@videos_router.patch(
- "/i/{video_id}",
- operation_id="update_video",
- response_model=VideoDTO,
-)
-async def update_video(
- video_id: str = Path(description="The id of the video to update"),
- video_changes: VideoRecordChanges = Body(description="The changes to apply to the video"),
-) -> VideoDTO:
- """Updates a video"""
-
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.get(
- "/i/{video_id}",
- operation_id="get_video_dto",
- response_model=VideoDTO,
-)
-async def get_video_dto(
- video_id: str = Path(description="The id of the video to get"),
-) -> VideoDTO:
- """Gets a video's DTO"""
-
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.post("/delete", operation_id="delete_videos_from_list", response_model=DeleteVideosResult)
-async def delete_videos_from_list(
- video_ids: list[str] = Body(description="The list of ids of videos to delete", embed=True),
-) -> DeleteVideosResult:
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.post("/star", operation_id="star_videos_in_list", response_model=StarredVideosResult)
-async def star_videos_in_list(
- video_ids: list[str] = Body(description="The list of ids of videos to star", embed=True),
-) -> StarredVideosResult:
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.post("/unstar", operation_id="unstar_videos_in_list", response_model=UnstarredVideosResult)
-async def unstar_videos_in_list(
- video_ids: list[str] = Body(description="The list of ids of videos to unstar", embed=True),
-) -> UnstarredVideosResult:
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.delete("/uncategorized", operation_id="delete_uncategorized_videos", response_model=DeleteVideosResult)
-async def delete_uncategorized_videos() -> DeleteVideosResult:
- """Deletes all videos that are uncategorized"""
-
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.get("/", operation_id="list_video_dtos", response_model=OffsetPaginatedResults[VideoDTO])
-async def list_video_dtos(
- is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
- board_id: Optional[str] = Query(
- default=None,
- description="The board id to filter by. Use 'none' to find videos without a board.",
- ),
- offset: int = Query(default=0, description="The page offset"),
- limit: int = Query(default=10, description="The number of videos per page"),
- order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
- starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
- search_term: Optional[str] = Query(default=None, description="The term to search for"),
-) -> OffsetPaginatedResults[VideoDTO]:
- """Lists video DTOs"""
-
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.get("/ids", operation_id="get_video_ids")
-async def get_video_ids(
- is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
- board_id: Optional[str] = Query(
- default=None,
- description="The board id to filter by. Use 'none' to find videos without a board.",
- ),
- order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
- starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
- search_term: Optional[str] = Query(default=None, description="The term to search for"),
-) -> VideoIdsResult:
- """Gets ordered list of video ids with metadata for optimistic updates"""
-
- raise HTTPException(status_code=501, detail="Not implemented")
-
-
-@videos_router.post(
- "/videos_by_ids",
- operation_id="get_videos_by_ids",
- responses={200: {"model": list[VideoDTO]}},
-)
-async def get_videos_by_ids(
- video_ids: list[str] = Body(embed=True, description="Object containing list of video ids to fetch DTOs for"),
-) -> list[VideoDTO]:
- """Gets video DTOs for the specified video ids. Maintains order of input ids."""
-
- raise HTTPException(status_code=501, detail="Not implemented")
diff --git a/invokeai/app/api/routers/workflows.py b/invokeai/app/api/routers/workflows.py
index 35b928a45af..5a37a75dcf9 100644
--- a/invokeai/app/api/routers/workflows.py
+++ b/invokeai/app/api/routers/workflows.py
@@ -106,7 +106,6 @@ async def list_workflows(
tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"),
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
- is_published: Optional[bool] = Query(default=None, description="Whether to include/exclude published workflows"),
) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]:
"""Gets a page of workflows"""
workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = []
@@ -119,7 +118,6 @@ async def list_workflows(
categories=categories,
tags=tags,
has_been_opened=has_been_opened,
- is_published=is_published,
)
for workflow in workflows.items:
workflows_with_thumbnails.append(
diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py
index ce1a2193dff..335327f532b 100644
--- a/invokeai/app/api_app.py
+++ b/invokeai/app/api_app.py
@@ -18,7 +18,6 @@
from invokeai.app.api.routers import (
app_info,
board_images,
- board_videos,
boards,
client_state,
download_queue,
@@ -28,7 +27,6 @@
session_queue,
style_presets,
utilities,
- videos,
workflows,
)
from invokeai.app.api.sockets import SocketIO
@@ -127,10 +125,8 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint):
app.include_router(model_manager.model_manager_router, prefix="/api")
app.include_router(download_queue.download_queue_router, prefix="/api")
app.include_router(images.images_router, prefix="/api")
-app.include_router(videos.videos_router, prefix="/api")
app.include_router(boards.boards_router, prefix="/api")
app.include_router(board_images.board_images_router, prefix="/api")
-app.include_router(board_videos.board_videos_router, prefix="/api")
app.include_router(model_relationships.model_relationships_router, prefix="/api")
app.include_router(app_info.app_router, prefix="/api")
app.include_router(session_queue.session_queue_router, prefix="/api")
diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py
index 9e2e982df5a..1bca7ec3f53 100644
--- a/invokeai/app/invocations/fields.py
+++ b/invokeai/app/invocations/fields.py
@@ -235,12 +235,6 @@ class ImageField(BaseModel):
image_name: str = Field(description="The name of the image")
-class VideoField(BaseModel):
- """A video primitive field"""
-
- video_id: str = Field(description="The id of the video")
-
-
class BoardField(BaseModel):
"""A board primitive field"""
@@ -549,27 +543,6 @@ def migrate_model_ui_type(ui_type: UIType | str, json_schema_extra: dict[str, An
ui_model_type = [ModelType.FluxRedux]
case UIType.LlavaOnevisionModel:
ui_model_type = [ModelType.LlavaOnevision]
- case UIType.Imagen3Model:
- ui_model_base = [BaseModelType.Imagen3]
- ui_model_type = [ModelType.Main]
- case UIType.Imagen4Model:
- ui_model_base = [BaseModelType.Imagen4]
- ui_model_type = [ModelType.Main]
- case UIType.ChatGPT4oModel:
- ui_model_base = [BaseModelType.ChatGPT4o]
- ui_model_type = [ModelType.Main]
- case UIType.Gemini2_5Model:
- ui_model_base = [BaseModelType.Gemini2_5]
- ui_model_type = [ModelType.Main]
- case UIType.FluxKontextModel:
- ui_model_base = [BaseModelType.FluxKontext]
- ui_model_type = [ModelType.Main]
- case UIType.Veo3Model:
- ui_model_base = [BaseModelType.Veo3]
- ui_model_type = [ModelType.Video]
- case UIType.RunwayModel:
- ui_model_base = [BaseModelType.Runway]
- ui_model_type = [ModelType.Video]
case _:
pass
diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py
index 1dc470b9705..10703a620cd 100644
--- a/invokeai/app/invocations/primitives.py
+++ b/invokeai/app/invocations/primitives.py
@@ -27,7 +27,6 @@
SD3ConditioningField,
TensorField,
UIComponent,
- VideoField,
)
from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -288,30 +287,6 @@ def invoke(self, context: InvocationContext) -> ImageCollectionOutput:
return ImageCollectionOutput(collection=self.collection)
-# endregion
-
-# region Video
-
-
-@invocation_output("video_output")
-class VideoOutput(BaseInvocationOutput):
- """Base class for nodes that output a video"""
-
- video: VideoField = OutputField(description="The output video")
- width: int = OutputField(description="The width of the video in pixels")
- height: int = OutputField(description="The height of the video in pixels")
- duration_seconds: float = OutputField(description="The duration of the video in seconds")
-
- @classmethod
- def build(cls, video_id: str, width: int, height: int, duration_seconds: float) -> "VideoOutput":
- return cls(
- video=VideoField(video_id=video_id),
- width=width,
- height=height,
- duration_seconds=duration_seconds,
- )
-
-
# endregion
# region DenoiseMask
diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py
index 81d05d7f597..5067d42999b 100644
--- a/invokeai/app/services/board_records/board_records_common.py
+++ b/invokeai/app/services/board_records/board_records_common.py
@@ -26,8 +26,6 @@ class BoardRecord(BaseModelExcludeNull):
"""The name of the cover image of the board."""
archived: bool = Field(description="Whether or not the board is archived.")
"""Whether or not the board is archived."""
- is_private: Optional[bool] = Field(default=None, description="Whether the board is private.")
- """Whether the board is private."""
def deserialize_board_record(board_dict: dict) -> BoardRecord:
@@ -42,7 +40,6 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord:
updated_at = board_dict.get("updated_at", get_iso_timestamp())
deleted_at = board_dict.get("deleted_at", get_iso_timestamp())
archived = board_dict.get("archived", False)
- is_private = board_dict.get("is_private", False)
return BoardRecord(
board_id=board_id,
@@ -52,7 +49,6 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord:
updated_at=updated_at,
deleted_at=deleted_at,
archived=archived,
- is_private=is_private,
)
diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py
index d25bb9d9da8..68cd3603287 100644
--- a/invokeai/app/services/boards/boards_common.py
+++ b/invokeai/app/services/boards/boards_common.py
@@ -14,12 +14,10 @@ class BoardDTO(BoardRecord):
"""The number of images in the board."""
asset_count: int = Field(description="The number of assets in the board.")
"""The number of assets in the board."""
- video_count: int = Field(description="The number of videos in the board.")
- """The number of videos in the board."""
def board_record_to_dto(
- board_record: BoardRecord, cover_image_name: Optional[str], image_count: int, asset_count: int, video_count: int
+ board_record: BoardRecord, cover_image_name: Optional[str], image_count: int, asset_count: int
) -> BoardDTO:
"""Converts a board record to a board DTO."""
return BoardDTO(
@@ -27,5 +25,4 @@ def board_record_to_dto(
cover_image_name=cover_image_name,
image_count=image_count,
asset_count=asset_count,
- video_count=video_count,
)
diff --git a/invokeai/app/services/boards/boards_default.py b/invokeai/app/services/boards/boards_default.py
index df161086459..6efeaa1fea8 100644
--- a/invokeai/app/services/boards/boards_default.py
+++ b/invokeai/app/services/boards/boards_default.py
@@ -28,8 +28,7 @@ def get_dto(self, board_id: str) -> BoardDTO:
cover_image_name = None
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id)
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id)
- video_count = 0 # noop for OSS
- return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count)
+ return board_record_to_dto(board_record, cover_image_name, image_count, asset_count)
def update(
self,
@@ -45,8 +44,7 @@ def update(
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id)
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id)
- video_count = 0 # noop for OSS
- return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count)
+ return board_record_to_dto(board_record, cover_image_name, image_count, asset_count)
def delete(self, board_id: str) -> None:
self.__invoker.services.board_records.delete(board_id)
@@ -72,8 +70,7 @@ def get_many(
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id)
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id)
- video_count = 0 # noop for OSS
- board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count))
+ board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count))
return OffsetPaginatedResults[BoardDTO](items=board_dtos, offset=offset, limit=limit, total=len(board_dtos))
@@ -91,7 +88,6 @@ def get_all(
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id)
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id)
- video_count = 0 # noop for OSS
- board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count))
+ board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count))
return board_dtos
diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py
index 2f995293984..a924f2eed9f 100644
--- a/invokeai/app/services/events/events_common.py
+++ b/invokeai/app/services/events/events_common.py
@@ -195,8 +195,6 @@ class InvocationErrorEvent(InvocationEventBase):
error_type: str = Field(description="The error type")
error_message: str = Field(description="The error message")
error_traceback: str = Field(description="The error traceback")
- user_id: Optional[str] = Field(default=None, description="The ID of the user who created the invocation")
- project_id: Optional[str] = Field(default=None, description="The ID of the user who created the invocation")
@classmethod
def build(
@@ -219,8 +217,6 @@ def build(
error_type=error_type,
error_message=error_message,
error_traceback=error_traceback,
- user_id=getattr(queue_item, "user_id", None),
- project_id=getattr(queue_item, "project_id", None),
)
@@ -241,7 +237,6 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
batch_status: BatchStatus = Field(description="The status of the batch")
queue_status: SessionQueueStatus = Field(description="The status of the queue")
session_id: str = Field(description="The ID of the session (aka graph execution state)")
- credits: Optional[float] = Field(default=None, description="The total credits used for this queue item")
@classmethod
def build(
@@ -264,7 +259,6 @@ def build(
completed_at=str(queue_item.completed_at) if queue_item.completed_at else None,
batch_status=batch_status,
queue_status=queue_status,
- credits=queue_item.credits,
)
diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py
index e912753f423..57b512a8558 100644
--- a/invokeai/app/services/session_queue/session_queue_common.py
+++ b/invokeai/app/services/session_queue/session_queue_common.py
@@ -249,15 +249,6 @@ class SessionQueueItem(BaseModel):
retried_from_item_id: Optional[int] = Field(
default=None, description="The item_id of the queue item that this item was retried from"
)
- is_api_validation_run: bool = Field(
- default=False,
- description="Whether this queue item is an API validation run.",
- )
- published_workflow_id: Optional[str] = Field(
- default=None,
- description="The ID of the published workflow associated with this queue item",
- )
- credits: Optional[float] = Field(default=None, description="The total credits used for this queue item")
session: GraphExecutionState = Field(description="The fully-populated session to be executed")
workflow: Optional[WorkflowWithoutID] = Field(
default=None, description="The workflow associated with this queue item"
diff --git a/invokeai/app/services/style_preset_records/style_preset_records_common.py b/invokeai/app/services/style_preset_records/style_preset_records_common.py
index 36153d002d0..9ea0b0219cf 100644
--- a/invokeai/app/services/style_preset_records/style_preset_records_common.py
+++ b/invokeai/app/services/style_preset_records/style_preset_records_common.py
@@ -26,7 +26,6 @@ class PresetData(BaseModel, extra="forbid"):
class PresetType(str, Enum, metaclass=MetaEnum):
User = "user"
Default = "default"
- Project = "project"
class StylePresetChanges(BaseModel, extra="forbid"):
diff --git a/invokeai/app/services/videos_common.py b/invokeai/app/services/videos_common.py
deleted file mode 100644
index a1b8d762287..00000000000
--- a/invokeai/app/services/videos_common.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import datetime
-from typing import Optional, Union
-
-from pydantic import BaseModel, Field, StrictBool, StrictStr
-
-from invokeai.app.util.misc import get_iso_timestamp
-from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
-
-VIDEO_DTO_COLS = ", ".join(
- [
- "videos." + c
- for c in [
- "video_id",
- "width",
- "height",
- "session_id",
- "node_id",
- "is_intermediate",
- "created_at",
- "updated_at",
- "deleted_at",
- "starred",
- ]
- ]
-)
-
-
-class VideoRecord(BaseModelExcludeNull):
- """Deserialized video record without metadata."""
-
- video_id: str = Field(description="The unique id of the video.")
- """The unique id of the video."""
- width: int = Field(description="The width of the video in px.")
- """The actual width of the video in px. This may be different from the width in metadata."""
- height: int = Field(description="The height of the video in px.")
- """The actual height of the video in px. This may be different from the height in metadata."""
- created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the video.")
- """The created timestamp of the video."""
- updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the video.")
- """The updated timestamp of the video."""
- deleted_at: Optional[Union[datetime.datetime, str]] = Field(
- default=None, description="The deleted timestamp of the video."
- )
- """The deleted timestamp of the video."""
- is_intermediate: bool = Field(description="Whether this is an intermediate video.")
- """Whether this is an intermediate video."""
- session_id: Optional[str] = Field(
- default=None,
- description="The session ID that generated this video, if it is a generated video.",
- )
- """The session ID that generated this video, if it is a generated video."""
- node_id: Optional[str] = Field(
- default=None,
- description="The node ID that generated this video, if it is a generated video.",
- )
- """The node ID that generated this video, if it is a generated video."""
- starred: bool = Field(description="Whether this video is starred.")
- """Whether this video is starred."""
-
-
-class VideoRecordChanges(BaseModelExcludeNull):
- """A set of changes to apply to a video record.
-
- Only limited changes are valid:
- - `session_id`: change the session associated with a video
- - `is_intermediate`: change the video's `is_intermediate` flag
- - `starred`: change whether the video is starred
- """
-
- session_id: Optional[StrictStr] = Field(
- default=None,
- description="The video's new session ID.",
- )
- """The video's new session ID."""
- is_intermediate: Optional[StrictBool] = Field(default=None, description="The video's new `is_intermediate` flag.")
- """The video's new `is_intermediate` flag."""
- starred: Optional[StrictBool] = Field(default=None, description="The video's new `starred` state")
- """The video's new `starred` state."""
-
-
-def deserialize_video_record(video_dict: dict) -> VideoRecord:
- """Deserializes a video record."""
-
- # Retrieve all the values, setting "reasonable" defaults if they are not present.
- video_id = video_dict.get("video_id", "unknown")
- width = video_dict.get("width", 0)
- height = video_dict.get("height", 0)
- session_id = video_dict.get("session_id", None)
- node_id = video_dict.get("node_id", None)
- created_at = video_dict.get("created_at", get_iso_timestamp())
- updated_at = video_dict.get("updated_at", get_iso_timestamp())
- deleted_at = video_dict.get("deleted_at", get_iso_timestamp())
- is_intermediate = video_dict.get("is_intermediate", False)
- starred = video_dict.get("starred", False)
-
- return VideoRecord(
- video_id=video_id,
- width=width,
- height=height,
- session_id=session_id,
- node_id=node_id,
- created_at=created_at,
- updated_at=updated_at,
- deleted_at=deleted_at,
- is_intermediate=is_intermediate,
- starred=starred,
- )
-
-
-class VideoCollectionCounts(BaseModel):
- starred_count: int = Field(description="The number of starred videos in the collection.")
- unstarred_count: int = Field(description="The number of unstarred videos in the collection.")
-
-
-class VideoIdsResult(BaseModel):
- """Response containing ordered video ids with metadata for optimistic updates."""
-
- video_ids: list[str] = Field(description="Ordered list of video ids")
- starred_count: int = Field(description="Number of starred videos (when starred_first=True)")
- total_count: int = Field(description="Total number of videos matching the query")
-
-
-class VideoUrlsDTO(BaseModelExcludeNull):
- """The URLs for an image and its thumbnail."""
-
- video_id: str = Field(description="The unique id of the video.")
- """The unique id of the video."""
- video_url: str = Field(description="The URL of the video.")
- """The URL of the video."""
- thumbnail_url: str = Field(description="The URL of the video's thumbnail.")
- """The URL of the video's thumbnail."""
-
-
-class VideoDTO(VideoRecord, VideoUrlsDTO):
- """Deserialized video record, enriched for the frontend."""
-
- board_id: Optional[str] = Field(
- default=None, description="The id of the board the image belongs to, if one exists."
- )
- """The id of the board the image belongs to, if one exists."""
-
-
-def video_record_to_dto(
- video_record: VideoRecord,
- video_url: str,
- thumbnail_url: str,
- board_id: Optional[str],
-) -> VideoDTO:
- """Converts a video record to a video DTO."""
- return VideoDTO(
- **video_record.model_dump(),
- video_url=video_url,
- thumbnail_url=thumbnail_url,
- board_id=board_id,
- )
-
-
-class ResultWithAffectedBoards(BaseModel):
- affected_boards: list[str] = Field(description="The ids of boards affected by the delete operation")
-
-
-class DeleteVideosResult(ResultWithAffectedBoards):
- deleted_videos: list[str] = Field(description="The ids of the videos that were deleted")
-
-
-class StarredVideosResult(ResultWithAffectedBoards):
- starred_videos: list[str] = Field(description="The ids of the videos that were starred")
-
-
-class UnstarredVideosResult(ResultWithAffectedBoards):
- unstarred_videos: list[str] = Field(description="The ids of the videos that were unstarred")
-
-
-class AddVideosToBoardResult(ResultWithAffectedBoards):
- added_videos: list[str] = Field(description="The video ids that were added to the board")
-
-
-class RemoveVideosFromBoardResult(ResultWithAffectedBoards):
- removed_videos: list[str] = Field(description="The video ids that were removed from their board")
diff --git a/invokeai/app/services/workflow_records/workflow_records_base.py b/invokeai/app/services/workflow_records/workflow_records_base.py
index bf91363281b..5bf42ed2533 100644
--- a/invokeai/app/services/workflow_records/workflow_records_base.py
+++ b/invokeai/app/services/workflow_records/workflow_records_base.py
@@ -47,7 +47,6 @@ def get_many(
query: Optional[str],
tags: Optional[list[str]],
has_been_opened: Optional[bool],
- is_published: Optional[bool],
) -> PaginatedResults[WorkflowRecordListItemDTO]:
"""Gets many workflows."""
pass
@@ -57,7 +56,6 @@ def counts_by_category(
self,
categories: list[WorkflowCategory],
has_been_opened: Optional[bool] = None,
- is_published: Optional[bool] = None,
) -> dict[str, int]:
"""Gets a dictionary of counts for each of the provided categories."""
pass
@@ -68,7 +66,6 @@ def counts_by_tag(
tags: list[str],
categories: Optional[list[WorkflowCategory]] = None,
has_been_opened: Optional[bool] = None,
- is_published: Optional[bool] = None,
) -> dict[str, int]:
"""Gets a dictionary of counts for each of the provided tags."""
pass
diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py
index fe203ab8c96..e0cea37468d 100644
--- a/invokeai/app/services/workflow_records/workflow_records_common.py
+++ b/invokeai/app/services/workflow_records/workflow_records_common.py
@@ -31,7 +31,6 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
User = "user"
Default = "default"
- Project = "project"
class WorkflowMeta(BaseModel):
@@ -67,7 +66,6 @@ class WorkflowWithoutID(BaseModel):
# This is typed as optional to prevent errors when pulling workflows from the DB. The frontend adds a default form if
# it is None.
form: dict[str, JsonValue] | None = Field(default=None, description="The form of the workflow.")
- is_published: bool | None = Field(default=None, description="Whether the workflow is published or not.")
model_config = ConfigDict(extra="ignore")
@@ -102,7 +100,6 @@ class WorkflowRecordDTOBase(BaseModel):
opened_at: Optional[Union[datetime.datetime, str]] = Field(
default=None, description="The opened timestamp of the workflow."
)
- is_published: bool | None = Field(default=None, description="Whether the workflow is published or not.")
class WorkflowRecordDTO(WorkflowRecordDTOBase):
diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py
index 72f37469de8..d6a94d156f0 100644
--- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py
+++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py
@@ -104,7 +104,6 @@ def get_many(
query: Optional[str] = None,
tags: Optional[list[str]] = None,
has_been_opened: Optional[bool] = None,
- is_published: Optional[bool] = None,
) -> PaginatedResults[WorkflowRecordListItemDTO]:
with self._db.transaction() as cursor:
# sanitize!
@@ -227,7 +226,6 @@ def counts_by_tag(
tags: list[str],
categories: Optional[list[WorkflowCategory]] = None,
has_been_opened: Optional[bool] = None,
- is_published: Optional[bool] = None,
) -> dict[str, int]:
if not tags:
return {}
@@ -279,7 +277,6 @@ def counts_by_category(
self,
categories: list[WorkflowCategory],
has_been_opened: Optional[bool] = None,
- is_published: Optional[bool] = None,
) -> dict[str, int]:
with self._db.transaction() as cursor:
result: dict[str, int] = {}
diff --git a/invokeai/backend/model_manager/configs/base.py b/invokeai/backend/model_manager/configs/base.py
index 8de9a2b8316..43c31c7e4fd 100644
--- a/invokeai/backend/model_manager/configs/base.py
+++ b/invokeai/backend/model_manager/configs/base.py
@@ -28,6 +28,17 @@
pass
+class URLModelSource(BaseModel):
+ type: Literal[ModelSourceType.Url] = Field(default=ModelSourceType.Url)
+ url: str = Field(
+ description="The URL from which the model was installed.",
+ )
+ api_response: str | None = Field(
+ default=None,
+ description="The original API response from the source, as stringified JSON.",
+ )
+
+
class Config_Base(ABC, BaseModel):
"""
Abstract base class for model configurations. A model config describes a specific combination of model base, type and
@@ -81,10 +92,6 @@ class Config_Base(ABC, BaseModel):
default=None,
description="Url for image to preview model",
)
- usage_info: str | None = Field(
- default=None,
- description="Usage information for this model",
- )
CONFIG_CLASSES: ClassVar[set[Type["Config_Base"]]] = set()
"""Set of all non-abstract subclasses of Config_Base, for use during model probing. In other words, this is the set
diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py
index dcd7c4c0edc..6b8d122d615 100644
--- a/invokeai/backend/model_manager/configs/factory.py
+++ b/invokeai/backend/model_manager/configs/factory.py
@@ -64,15 +64,8 @@
Main_Diffusers_SD3_Config,
Main_Diffusers_SDXL_Config,
Main_Diffusers_SDXLRefiner_Config,
- Main_ExternalAPI_ChatGPT4o_Config,
- Main_ExternalAPI_FluxKontext_Config,
- Main_ExternalAPI_Gemini2_5_Config,
- Main_ExternalAPI_Imagen3_Config,
- Main_ExternalAPI_Imagen4_Config,
Main_GGUF_FLUX_Config,
MainModelDefaultSettings,
- Video_ExternalAPI_Runway_Config,
- Video_ExternalAPI_Veo3_Config,
)
from invokeai.backend.model_manager.configs.siglip import SigLIP_Diffusers_Config
from invokeai.backend.model_manager.configs.spandrel import Spandrel_Checkpoint_Config
@@ -218,15 +211,6 @@
Annotated[SigLIP_Diffusers_Config, SigLIP_Diffusers_Config.get_tag()],
Annotated[FLUXRedux_Checkpoint_Config, FLUXRedux_Checkpoint_Config.get_tag()],
Annotated[LlavaOnevision_Diffusers_Config, LlavaOnevision_Diffusers_Config.get_tag()],
- # Main - external API
- Annotated[Main_ExternalAPI_ChatGPT4o_Config, Main_ExternalAPI_ChatGPT4o_Config.get_tag()],
- Annotated[Main_ExternalAPI_Gemini2_5_Config, Main_ExternalAPI_Gemini2_5_Config.get_tag()],
- Annotated[Main_ExternalAPI_Imagen3_Config, Main_ExternalAPI_Imagen3_Config.get_tag()],
- Annotated[Main_ExternalAPI_Imagen4_Config, Main_ExternalAPI_Imagen4_Config.get_tag()],
- Annotated[Main_ExternalAPI_FluxKontext_Config, Main_ExternalAPI_FluxKontext_Config.get_tag()],
- # Video - external API
- Annotated[Video_ExternalAPI_Veo3_Config, Video_ExternalAPI_Veo3_Config.get_tag()],
- Annotated[Video_ExternalAPI_Runway_Config, Video_ExternalAPI_Runway_Config.get_tag()],
# Unknown model (fallback)
Annotated[Unknown_Config, Unknown_Config.get_tag()],
],
diff --git a/invokeai/backend/model_manager/configs/main.py b/invokeai/backend/model_manager/configs/main.py
index dcb948d99bb..03c44e1a778 100644
--- a/invokeai/backend/model_manager/configs/main.py
+++ b/invokeai/backend/model_manager/configs/main.py
@@ -657,49 +657,3 @@ def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -
**override_fields,
repo_variant=repo_variant,
)
-
-
-class ExternalAPI_Config_Base(ABC, BaseModel):
- """Model config for API-based models."""
-
- format: Literal[ModelFormat.Api] = Field(default=ModelFormat.Api)
-
- @classmethod
- def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self:
- raise NotAMatchError("External API models cannot be built from disk")
-
-
-class Main_ExternalAPI_ChatGPT4o_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base):
- base: Literal[BaseModelType.ChatGPT4o] = Field(default=BaseModelType.ChatGPT4o)
-
-
-class Main_ExternalAPI_Gemini2_5_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base):
- base: Literal[BaseModelType.Gemini2_5] = Field(default=BaseModelType.Gemini2_5)
-
-
-class Main_ExternalAPI_Imagen3_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base):
- base: Literal[BaseModelType.Imagen3] = Field(default=BaseModelType.Imagen3)
-
-
-class Main_ExternalAPI_Imagen4_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base):
- base: Literal[BaseModelType.Imagen4] = Field(default=BaseModelType.Imagen4)
-
-
-class Main_ExternalAPI_FluxKontext_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base):
- base: Literal[BaseModelType.FluxKontext] = Field(default=BaseModelType.FluxKontext)
-
-
-class Video_Config_Base(ABC, BaseModel):
- type: Literal[ModelType.Video] = Field(default=ModelType.Video)
- trigger_phrases: set[str] | None = Field(description="Set of trigger phrases for this model", default=None)
- default_settings: MainModelDefaultSettings | None = Field(
- description="Default settings for this model", default=None
- )
-
-
-class Video_ExternalAPI_Veo3_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base):
- base: Literal[BaseModelType.Veo3] = Field(default=BaseModelType.Veo3)
-
-
-class Video_ExternalAPI_Runway_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base):
- base: Literal[BaseModelType.Runway] = Field(default=BaseModelType.Runway)
diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py
index 99a31f438d1..38afd44fcb6 100644
--- a/invokeai/backend/model_manager/taxonomy.py
+++ b/invokeai/backend/model_manager/taxonomy.py
@@ -48,21 +48,6 @@ class BaseModelType(str, Enum):
"""Indicates the model is associated with FLUX.1 model architecture, including FLUX Dev, Schnell and Fill."""
CogView4 = "cogview4"
"""Indicates the model is associated with CogView 4 model architecture."""
- Imagen3 = "imagen3"
- """Indicates the model is associated with Google Imagen 3 model architecture. This is an external API model."""
- Imagen4 = "imagen4"
- """Indicates the model is associated with Google Imagen 4 model architecture. This is an external API model."""
- Gemini2_5 = "gemini-2.5"
- """Indicates the model is associated with Google Gemini 2.5 Flash Image model architecture. This is an external API model."""
- ChatGPT4o = "chatgpt-4o"
- """Indicates the model is associated with OpenAI ChatGPT 4o Image model architecture. This is an external API model."""
- FluxKontext = "flux-kontext"
- """Indicates the model is associated with FLUX Kontext model architecture. This is an external API model; local FLUX
- Kontext models use the base `Flux`."""
- Veo3 = "veo3"
- """Indicates the model is associated with Google Veo 3 video model architecture. This is an external API model."""
- Runway = "runway"
- """Indicates the model is associated with Runway video model architecture. This is an external API model."""
Unknown = "unknown"
"""Indicates the model's base architecture is unknown."""
@@ -86,7 +71,6 @@ class ModelType(str, Enum):
SigLIP = "siglip"
FluxRedux = "flux_redux"
LlavaOnevision = "llava_onevision"
- Video = "video"
Unknown = "unknown"
@@ -145,7 +129,6 @@ class ModelFormat(str, Enum):
BnbQuantizedLlmInt8b = "bnb_quantized_int8b"
BnbQuantizednf4b = "bnb_quantized_nf4b"
GGUFQuantized = "gguf_quantized"
- Api = "api"
Unknown = "unknown"
diff --git a/invokeai/frontend/web/.storybook/preview.tsx b/invokeai/frontend/web/.storybook/preview.tsx
index 4f1cc0ed9fb..eb3d0391db4 100644
--- a/invokeai/frontend/web/.storybook/preview.tsx
+++ b/invokeai/frontend/web/.storybook/preview.tsx
@@ -10,7 +10,6 @@ import { Provider } from 'react-redux';
// @ts-ignore
import translationEN from '../public/locales/en.json';
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
-import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
import { createStore } from '../src/app/store/store';
import { ReduxInit } from './ReduxInit';
@@ -28,7 +27,6 @@ i18n.use(initReactI18next).init({
const store = createStore();
$store.set(store);
-$baseUrl.set('http://localhost:9090');
const preview: Preview = {
decorators: [
diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json
index e7a533e2ae8..d6e220fd2df 100644
--- a/invokeai/frontend/web/package.json
+++ b/invokeai/frontend/web/package.json
@@ -69,7 +69,6 @@
"linkify-react": "^4.3.1",
"linkifyjs": "^4.3.1",
"lru-cache": "^11.1.0",
- "media-chrome": "^4.13.0",
"mtwist": "^1.0.2",
"nanoid": "^5.1.5",
"nanostores": "^1.0.1",
@@ -88,13 +87,11 @@
"react-hotkeys-hook": "4.5.0",
"react-i18next": "^15.5.3",
"react-icons": "^5.5.0",
- "react-player": "^3.3.1",
"react-redux": "9.2.0",
"react-resizable-panels": "^3.0.3",
"react-textarea-autosize": "^8.5.9",
"react-use": "^17.6.0",
"react-virtuoso": "^4.13.0",
- "redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^5.2.0",
"redux-undo": "^1.1.0",
"rfdc": "^1.4.1",
@@ -151,8 +148,6 @@
"type-fest": "^4.40.0",
"typescript": "^5.8.3",
"vite": "^7.0.5",
- "vite-plugin-css-injected-by-js": "^3.5.2",
- "vite-plugin-dts": "^4.5.3",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^5.1.4",
"vitest": "^3.1.2"
diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml
index e80b7011165..04ba19ebce9 100644
--- a/invokeai/frontend/web/pnpm-lock.yaml
+++ b/invokeai/frontend/web/pnpm-lock.yaml
@@ -98,9 +98,6 @@ importers:
lru-cache:
specifier: ^11.1.0
version: 11.1.0
- media-chrome:
- specifier: ^4.13.0
- version: 4.13.0(react@18.3.1)
mtwist:
specifier: ^1.0.2
version: 1.0.2
@@ -155,9 +152,6 @@ importers:
react-icons:
specifier: ^5.5.0
version: 5.5.0(react@18.3.1)
- react-player:
- specifier: ^3.3.1
- version: 3.3.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
react-redux:
specifier: 9.2.0
version: 9.2.0(@types/react@18.3.23)(react@18.3.1)(redux@5.0.1)
@@ -173,9 +167,6 @@ importers:
react-virtuoso:
specifier: ^4.13.0
version: 4.13.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
- redux-dynamic-middlewares:
- specifier: ^2.2.0
- version: 2.2.0
redux-remember:
specifier: ^5.2.0
version: 5.2.0(redux@5.0.1)
@@ -327,12 +318,6 @@ importers:
vite:
specifier: ^7.0.5
version: 7.0.5(@types/node@22.16.0)(jiti@2.4.2)
- vite-plugin-css-injected-by-js:
- specifier: ^3.5.2
- version: 3.5.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2))
- vite-plugin-dts:
- specifier: ^4.5.3
- version: 4.5.4(@types/node@22.16.0)(rollup@4.45.1)(typescript@5.8.3)(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2))
vite-plugin-eslint:
specifier: ^1.8.1
version: 1.8.1(eslint@9.31.0(jiti@2.4.2))(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2))
@@ -930,44 +915,6 @@ packages:
'@types/react': '>=16'
react: '>=16'
- '@microsoft/api-extractor-model@7.30.6':
- resolution: {integrity: sha512-znmFn69wf/AIrwHya3fxX6uB5etSIn6vg4Q4RB/tb5VDDs1rqREc+AvMC/p19MUN13CZ7+V/8pkYPTj7q8tftg==}
-
- '@microsoft/api-extractor@7.52.8':
- resolution: {integrity: sha512-cszYIcjiNscDoMB1CIKZ3My61+JOhpERGlGr54i6bocvGLrcL/wo9o+RNXMBrb7XgLtKaizZWUpqRduQuHQLdg==}
- hasBin: true
-
- '@microsoft/tsdoc-config@0.17.1':
- resolution: {integrity: sha512-UtjIFe0C6oYgTnad4q1QP4qXwLhe6tIpNTRStJ2RZEPIkqQPREAwE5spzVxsdn9UaEMUqhh0AqSx3X4nWAKXWw==}
-
- '@microsoft/tsdoc@0.15.1':
- resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==}
-
- '@mux/mux-data-google-ima@0.2.8':
- resolution: {integrity: sha512-0ZEkHdcZ6bS8QtcjFcoJeZxJTpX7qRIledf4q1trMWPznugvtajCjCM2kieK/pzkZj1JM6liDRFs1PJSfVUs2A==}
-
- '@mux/mux-player-react@3.5.3':
- resolution: {integrity: sha512-f0McZbIXYDkzecFwhhkf0JgEInPnsOClgBqBhkdhRlLRdrAzMATib+D3Di3rPkRHNH7rc/WWORvSxgJz6m6zkA==}
- peerDependencies:
- '@types/react': ^17.0.0 || ^17.0.0-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0
- '@types/react-dom': '*'
- react: ^17.0.2 || ^17.0.0-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0
- react-dom: ^17.0.2 || ^17.0.2-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0
- peerDependenciesMeta:
- '@types/react':
- optional: true
- '@types/react-dom':
- optional: true
-
- '@mux/mux-player@3.5.3':
- resolution: {integrity: sha512-uXKFXbdtioAi+clSVfD60Rw4r7OvA62u2jV6aar9loW9qMsmKv8LU+8uaIaWQjyAORp6E0S37GOVjo72T6O2eQ==}
-
- '@mux/mux-video@0.26.1':
- resolution: {integrity: sha512-gkMdBAgNlB4+krANZHkQFzYWjWeNsJz69y1/hnPtmNQnpvW+O7oc71OffcZrbblyibSxWMQ6MQpYmBVjXlp6sA==}
-
- '@mux/playback-core@0.30.1':
- resolution: {integrity: sha512-rnO1NE9xHDyzbAkmE6ygJYcD7cyyMt7xXqWTykxlceaoSXLjUqgp42HDio7Lcidto4x/O4FIa7ztjV2aCBCXgQ==}
-
'@nanostores/react@1.0.0':
resolution: {integrity: sha512-eDduyNy+lbQJMg6XxZ/YssQqF6b4OXMFEZMYKPJCCmBevp1lg0g+4ZRi94qGHirMtsNfAWKNwsjOhC+q1gvC+A==}
engines: {node: ^20.0.0 || >=22.0.0}
@@ -1383,28 +1330,6 @@ packages:
'@rtsao/scc@1.1.0':
resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
- '@rushstack/node-core-library@5.13.1':
- resolution: {integrity: sha512-5yXhzPFGEkVc9Fu92wsNJ9jlvdwz4RNb2bMso+/+TH0nMm1jDDDsOIf4l8GAkPxGuwPw5DH24RliWVfSPhlW/Q==}
- peerDependencies:
- '@types/node': '*'
- peerDependenciesMeta:
- '@types/node':
- optional: true
-
- '@rushstack/rig-package@0.5.3':
- resolution: {integrity: sha512-olzSSjYrvCNxUFZowevC3uz8gvKr3WTpHQ7BkpjtRpA3wK+T0ybep/SRUMfr195gBzJm5gaXw0ZMgjIyHqJUow==}
-
- '@rushstack/terminal@0.15.3':
- resolution: {integrity: sha512-DGJ0B2Vm69468kZCJkPj3AH5nN+nR9SPmC0rFHtzsS4lBQ7/dgOwtwVxYP7W9JPDMuRBkJ4KHmWKr036eJsj9g==}
- peerDependencies:
- '@types/node': '*'
- peerDependenciesMeta:
- '@types/node':
- optional: true
-
- '@rushstack/ts-command-line@5.0.1':
- resolution: {integrity: sha512-bsbUucn41UXrQK7wgM8CNM/jagBytEyJqXw/umtI8d68vFm1Jwxh1OtLrlW7uGZgjCWiiPH6ooUNa1aVsuVr3Q==}
-
'@socket.io/component-emitter@3.1.2':
resolution: {integrity: sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==}
@@ -1477,9 +1402,6 @@ packages:
typescript:
optional: true
- '@svta/common-media-library@0.12.4':
- resolution: {integrity: sha512-9EuOoaNmz7JrfGwjsrD9SxF9otU5TNMnbLu1yU4BeLK0W5cDxVXXR58Z89q9u2AnHjIctscjMTYdlqQ1gojTuw==}
-
'@swc/core-darwin-arm64@1.12.9':
resolution: {integrity: sha512-GACFEp4nD6V+TZNR2JwbMZRHB+Yyvp14FrcmB6UCUYmhuNWjkxi+CLnEvdbuiKyQYv0zA+TRpCHZ+whEs6gwfA==}
engines: {node: '>=10'}
@@ -1572,9 +1494,6 @@ packages:
'@tybys/wasm-util@0.9.0':
resolution: {integrity: sha512-6+7nlbMVX/PVDCwaIQ8nTOPveOcFLSt8GcXdx8hD0bt39uWxYT88uXzqTd4fTvqta7oeUJqudepapKNt2DYJFw==}
- '@types/argparse@1.0.38':
- resolution: {integrity: sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==}
-
'@types/aria-query@5.0.4':
resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==}
@@ -1734,12 +1653,6 @@ packages:
resolution: {integrity: sha512-YzfhzcTnZVPiLfP/oeKtDp2evwvHLMe0LOy7oe+hb9KKIumLNohYS9Hgp1ifwpu42YWxhZE8yieggz6JpqO/1w==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
- '@vercel/edge@1.2.2':
- resolution: {integrity: sha512-1+y+f6rk0Yc9ss9bRDgz/gdpLimwoRteKHhrcgHvEpjbP1nyT3ByqEMWm2BTcpIO5UtDmIFXc8zdq4LR190PDA==}
-
- '@vimeo/player@2.29.0':
- resolution: {integrity: sha512-9JjvjeqUndb9otCCFd0/+2ESsLk7VkDE6sxOBy9iy2ukezuQbplVRi+g9g59yAurKofbmTi/KcKxBGO/22zWRw==}
-
'@vitejs/plugin-react-swc@3.10.2':
resolution: {integrity: sha512-xD3Rdvrt5LgANug7WekBn1KhcvLn1H3jNBfJRL3reeOIua/WnZOEV5qi5qIBq5T8R0jUDmRtxuvk4bPhzGHDWw==}
peerDependencies:
@@ -1788,35 +1701,6 @@ packages:
'@vitest/utils@3.2.4':
resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==}
- '@volar/language-core@2.4.17':
- resolution: {integrity: sha512-chmRZMbKmcGpKMoO7Reb70uiLrzo0KWC2CkFttKUuKvrE+VYgi+fL9vWMJ07Fv5ulX0V1TAyyacN9q3nc5/ecA==}
-
- '@volar/source-map@2.4.17':
- resolution: {integrity: sha512-QDybtQyO3Ms/NjFqNHTC5tbDN2oK5VH7ZaKrcubtfHBDj63n2pizHC3wlMQ+iT55kQXZUUAbmBX5L1C8CHFeBw==}
-
- '@volar/typescript@2.4.17':
- resolution: {integrity: sha512-3paEFNh4P5DkgNUB2YkTRrfUekN4brAXxd3Ow1syMqdIPtCZHbUy4AW99S5RO/7mzyTWPMdDSo3mqTpB/LPObQ==}
-
- '@vue/compiler-core@3.5.17':
- resolution: {integrity: sha512-Xe+AittLbAyV0pabcN7cP7/BenRBNcteM4aSDCtRvGw0d9OL+HG1u/XHLY/kt1q4fyMeZYXyIYrsHuPSiDPosA==}
-
- '@vue/compiler-dom@3.5.17':
- resolution: {integrity: sha512-+2UgfLKoaNLhgfhV5Ihnk6wB4ljyW1/7wUIog2puUqajiC29Lp5R/IKDdkebh9jTbTogTbsgB+OY9cEWzG95JQ==}
-
- '@vue/compiler-vue2@2.7.16':
- resolution: {integrity: sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==}
-
- '@vue/language-core@2.2.0':
- resolution: {integrity: sha512-O1ZZFaaBGkKbsRfnVH1ifOK1/1BUkyK+3SQsfnh6PmMmD4qJcTU8godCeA96jjDRTL6zgnK7YzCHfaUlH2r0Mw==}
- peerDependencies:
- typescript: '*'
- peerDependenciesMeta:
- typescript:
- optional: true
-
- '@vue/shared@3.5.17':
- resolution: {integrity: sha512-CabR+UN630VnsJO/jHWYBC1YVXyMq94KKp6iF5MQgZJs5I8cmjw6oVMO1oDbtBkENSHSSn/UadWlW/OAgdmKrg==}
-
'@xobotyi/scrollbar-width@1.9.5':
resolution: {integrity: sha512-N8tkAACJx2ww8vFMneJmaAgmjAG1tnVBZJRLRcx061tmsLRZHSEZSLuGWnwPtunsSLvSqXQ2wfp7Mgqg1I+2dQ==}
@@ -1855,34 +1739,9 @@ packages:
resolution: {integrity: sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==}
engines: {node: '>= 14'}
- ajv-draft-04@1.0.0:
- resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==}
- peerDependencies:
- ajv: ^8.5.0
- peerDependenciesMeta:
- ajv:
- optional: true
-
- ajv-formats@3.0.1:
- resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==}
- peerDependencies:
- ajv: ^8.0.0
- peerDependenciesMeta:
- ajv:
- optional: true
-
ajv@6.12.6:
resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==}
- ajv@8.12.0:
- resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==}
-
- ajv@8.13.0:
- resolution: {integrity: sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==}
-
- alien-signals@0.4.14:
- resolution: {integrity: sha512-itUAVzhczTmP2U5yX67xVpsbbOiquusbWVyA9N+sy6+r6YVbFkahXvNCeEPWEOMhwDYwbVbGHFkVL03N9I5g+Q==}
-
ansi-colors@4.1.3:
resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==}
engines: {node: '>=6'}
@@ -1907,9 +1766,6 @@ packages:
resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==}
engines: {node: '>=12'}
- argparse@1.0.10:
- resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==}
-
argparse@2.0.1:
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
@@ -1992,15 +1848,6 @@ packages:
base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
- bcp-47-match@2.0.3:
- resolution: {integrity: sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ==}
-
- bcp-47-normalize@2.3.0:
- resolution: {integrity: sha512-8I/wfzqQvttUFz7HVJgIZ7+dj3vUaIyIxYXaTRP1YWoSDfzt6TUmxaKZeuXR62qBmYr+nvuWINFRl6pZ5DlN4Q==}
-
- bcp-47@2.1.0:
- resolution: {integrity: sha512-9IIS3UPrvIa1Ej+lVDdDwO7zLehjqsaByECw0bu2RRGP73jALm6FYbzI5gWbgHLvNdkvfXB5YrSbocZdOS0c0w==}
-
better-opn@3.0.2:
resolution: {integrity: sha512-aVNobHnJqLiUelTaHat9DZ1qM2w0C0Eym4LPI/3JxOnSokGVdsl1T1kN7TFvsEAD8G47A6VKQ0TVHqbBnYMJlQ==}
engines: {node: '>=12.0.0'}
@@ -2056,14 +1903,6 @@ packages:
caniuse-lite@1.0.30001727:
resolution: {integrity: sha512-pB68nIHmbN6L/4C6MH1DokyR3bYqFwjaSs/sWDHGj4CTcFtQUQMuJftVwWkXq7mNWOybD3KhUv3oWHoGxgP14Q==}
- castable-video@1.1.10:
- resolution: {integrity: sha512-/T1I0A4VG769wTEZ8gWuy1Crn9saAfRTd1UYTb8xbOPlN78+zOi/1nU2dD5koNkfE5VWvgabkIqrGKmyNXOjSQ==}
-
- ce-la-react@0.3.1:
- resolution: {integrity: sha512-g0YwpZDPIwTwFumGTzNHcgJA6VhFfFCJkSNdUdC04br2UfU+56JDrJrJva3FZ7MToB4NDHAFBiPE/PZdNl1mQA==}
- peerDependencies:
- react: '>=17.0.0'
-
chai@5.2.0:
resolution: {integrity: sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==}
engines: {node: '>=12'}
@@ -2114,18 +1953,12 @@ packages:
resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==}
engines: {node: '>=0.8'}
- cloudflare-video-element@1.3.3:
- resolution: {integrity: sha512-qrHzwLmUhisoIuEoKc7iBbdzBNj2Pi7ThHslU/9U/6PY9DEvo4mh/U+w7OVuzXT9ks7ZXfARvDBfPAaMGF/hIg==}
-
cmdk@1.1.1:
resolution: {integrity: sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==}
peerDependencies:
react: ^18 || ^19 || ^19.0.0-rc
react-dom: ^18 || ^19 || ^19.0.0-rc
- codem-isoboxer@0.3.10:
- resolution: {integrity: sha512-eNk3TRV+xQMJ1PEj0FQGY8KD4m0GPxT487XJ+Iftm7mVa9WpPFDMWqPt+46buiP5j5Wzqe5oMIhqBcAeKfygSA==}
-
color-convert@2.0.1:
resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
engines: {node: '>=7.0.0'}
@@ -2153,12 +1986,6 @@ packages:
engines: {node: '>=18'}
hasBin: true
- confbox@0.1.8:
- resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==}
-
- confbox@0.2.2:
- resolution: {integrity: sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==}
-
convert-source-map@1.9.0:
resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==}
@@ -2195,9 +2022,6 @@ packages:
csstype@3.1.3:
resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==}
- custom-media-element@1.4.5:
- resolution: {integrity: sha512-cjrsQufETwxjvwZbYbKBCJNvmQ2++G9AvT45zDi7NXL9k2PdVcs2h0jQz96J6G4TMKRCcEsoJ+QTgQD00Igtjw==}
-
d3-color@3.1.0:
resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==}
engines: {node: '>=12'}
@@ -2236,12 +2060,6 @@ packages:
resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==}
engines: {node: '>=12'}
- dash-video-element@0.1.6:
- resolution: {integrity: sha512-4gHShaQjcFv6diX5EzB6qAdUGKlIUGGZY8J8yp2pQkWqR0jX4c6plYy0cFraN7mr0DZINe8ujDN1fssDYxJjcg==}
-
- dashjs@5.0.3:
- resolution: {integrity: sha512-TXndNnCUjFjF2nYBxDVba+hWRpVkadkQ8flLp7kHkem+5+wZTfRShJCnVkPUosmjS0YPE9fVNLbYPJxHBeQZvA==}
-
data-view-buffer@1.0.2:
resolution: {integrity: sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==}
engines: {node: '>= 0.4'}
@@ -2254,9 +2072,6 @@ packages:
resolution: {integrity: sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==}
engines: {node: '>= 0.4'}
- de-indent@1.0.2:
- resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==}
-
debug@3.2.7:
resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==}
peerDependencies:
@@ -2371,10 +2186,6 @@ packages:
resolution: {integrity: sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==}
engines: {node: '>=10.0.0'}
- entities@4.5.0:
- resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==}
- engines: {node: '>=0.12'}
-
error-ex@1.3.2:
resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==}
@@ -2579,9 +2390,6 @@ packages:
resolution: {integrity: sha512-/kP8CAwxzLVEeFrMm4kMmy4CCDlpipyA7MYLVrdJIkV0fYF0UaigQHRsxHiuY/GEea+bh4KSv3TIlgr+2UL6bw==}
engines: {node: '>=12.0.0'}
- exsolve@1.0.7:
- resolution: {integrity: sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==}
-
fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
@@ -2815,22 +2623,9 @@ packages:
resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==}
engines: {node: '>= 0.4'}
- he@1.2.0:
- resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==}
- hasBin: true
-
- hls-video-element@1.5.6:
- resolution: {integrity: sha512-KPdvSR+oBJPiCVb+m6pd2mn3rJEjNbaK8pGhSkxFI2pmyvZIeTVQrPbEO9PT/juwXHwhvCoKJnNxAuFwJG9H5A==}
-
- hls.js@1.6.9:
- resolution: {integrity: sha512-q7qPrri6GRwjcNd7EkFCmhiJ6PBIxeUsdxKbquBkQZpg9jAnp6zSAeN9eEWFlOB09J8JfzAQGoXL5ZEAltjO9g==}
-
hoist-non-react-statics@3.3.2:
resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==}
- html-entities@2.6.0:
- resolution: {integrity: sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ==}
-
html-escaper@2.0.2:
resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
@@ -2869,9 +2664,6 @@ packages:
resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==}
engines: {node: '>= 4'}
- immediate@3.0.6:
- resolution: {integrity: sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==}
-
immer@10.1.1:
resolution: {integrity: sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==}
@@ -2879,13 +2671,6 @@ packages:
resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==}
engines: {node: '>=6'}
- import-lazy@4.0.0:
- resolution: {integrity: sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==}
- engines: {node: '>=8'}
-
- imsc@1.1.5:
- resolution: {integrity: sha512-V8je+CGkcvGhgl2C1GlhqFFiUOIEdwXbXLiu1Fcubvvbo+g9inauqT3l0pNYXGoLPBj3jxtZz9t+wCopMkwadQ==}
-
imurmurhash@0.1.4:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
@@ -2908,12 +2693,6 @@ packages:
resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==}
engines: {node: '>= 0.4'}
- is-alphabetical@2.0.1:
- resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==}
-
- is-alphanumerical@2.0.1:
- resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==}
-
is-array-buffer@3.0.5:
resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==}
engines: {node: '>= 0.4'}
@@ -2949,9 +2728,6 @@ packages:
resolution: {integrity: sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==}
engines: {node: '>= 0.4'}
- is-decimal@2.0.1:
- resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==}
-
is-docker@2.2.1:
resolution: {integrity: sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==}
engines: {node: '>=8'}
@@ -3074,9 +2850,6 @@ packages:
resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==}
hasBin: true
- jju@1.4.0:
- resolution: {integrity: sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==}
-
js-cookie@2.2.1:
resolution: {integrity: sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ==}
@@ -3146,9 +2919,6 @@ packages:
'@types/node': '>=18'
typescript: '>=5.0.4'
- kolorist@1.8.0:
- resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==}
-
konva@9.3.22:
resolution: {integrity: sha512-yQI5d1bmELlD/fowuyfOp9ff+oamg26WOCkyqUyc+nczD/lhRa3EvD2MZOoc4c1293TAubW9n34fSQLgSeEgSw==}
@@ -3156,9 +2926,6 @@ packages:
resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
engines: {node: '>= 0.8.0'}
- lie@3.1.1:
- resolution: {integrity: sha512-RiNhHysUjhrDQntfYSfY4MU24coXXdEOgw9WGcKHNeEwffDYbF//u87M1EWaMGzuFoSbqW0C9C6lEEhDOAswfw==}
-
lines-and-columns@1.2.4:
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
@@ -3179,13 +2946,6 @@ packages:
resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
- local-pkg@1.1.1:
- resolution: {integrity: sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg==}
- engines: {node: '>=14'}
-
- localforage@1.10.0:
- resolution: {integrity: sha512-14/H1aX7hzBBmmh7sGPd+AOMkkIrHM3Z1PAyGgZigA1H1p5O5ANnMyWzvpAETtG68/dC4pC0ncy3+PPGzXZHPg==}
-
locate-path@6.0.0:
resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
engines: {node: '>=10'}
@@ -3224,10 +2984,6 @@ packages:
lru-cache@5.1.1:
resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==}
- lru-cache@6.0.0:
- resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==}
- engines: {node: '>=10'}
-
lz-string@1.5.0:
resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==}
hasBin: true
@@ -3252,15 +3008,6 @@ packages:
mdn-data@2.0.14:
resolution: {integrity: sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==}
- media-chrome@4.11.1:
- resolution: {integrity: sha512-+2niDc4qOwlpFAjwxg1OaizK/zKV6y7QqGm4nBFEVlSaG0ZBgOmfc4IXAPiirZqAlZGaFFUaMqCl1SpGU0/naA==}
-
- media-chrome@4.13.0:
- resolution: {integrity: sha512-DfX/Hwxjae/tEHjr1tVnV/6XDFHriMXI1ev8Ji4Z/YwXnqMhNfRtvNsMjefnQK5pkMS/9hC+jmdS+VDWZfsSIw==}
-
- media-tracks@0.3.3:
- resolution: {integrity: sha512-9P2FuUHnZZ3iji+2RQk7Zkh5AmZTnOG5fODACnjhCVveX1McY3jmCRHofIEI+yTBqplz7LXy48c7fQ3Uigp88w==}
-
memoize-one@6.0.0:
resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==}
@@ -3280,9 +3027,6 @@ packages:
resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==}
engines: {node: '>=4'}
- minimatch@3.0.8:
- resolution: {integrity: sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==}
-
minimatch@3.1.2:
resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
@@ -3301,9 +3045,6 @@ packages:
resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==}
engines: {node: '>=16 || 14 >=14.17'}
- mlly@1.7.4:
- resolution: {integrity: sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw==}
-
moo@0.5.2:
resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==}
@@ -3323,15 +3064,6 @@ packages:
mtwist@1.0.2:
resolution: {integrity: sha512-eRsSga5jkLg7nNERPOV8vDNxgSwuEcj5upQfJcT0gXfJwXo3pMc7xOga0fu8rXHyrxzl7GFVWWDuaPQgpKDvgw==}
- muggle-string@0.4.1:
- resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==}
-
- mux-embed@5.11.0:
- resolution: {integrity: sha512-uczzXVraqMRmyYmpGh2zthTmBKvvc5D5yaVKQRgGhFOnF7E4nkhqNkdkQc4C0WTPzdqdPl5OtCelNWMF4tg5RQ==}
-
- mux-embed@5.9.0:
- resolution: {integrity: sha512-wmunL3uoPhma/tWy8PrDPZkvJpXvSFBwbD3KkC4PG8Ztjfb1X3hRJwGUAQyRz7z99b/ovLm2UTTitrkvStjH4w==}
-
nano-css@5.6.2:
resolution: {integrity: sha512-+6bHaC8dSDGALM1HJjOHVXpuastdu2xFoZlC77Jh4cg+33Zcgm+Gxd+1xsnpZK14eyHObSp82+ll5y3SX75liw==}
peerDependencies:
@@ -3352,9 +3084,6 @@ packages:
resolution: {integrity: sha512-kNZ9xnoJYKg/AfxjrVL4SS0fKX++4awQReGqWnwTRHxeHGZ1FJFVgTqr/eMrNQdp0Tz7M7tG/TDaX8QfHDwVCw==}
engines: {node: ^20.0.0 || >=22.0.0}
- native-promise-only@0.8.1:
- resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==}
-
natural-compare@1.4.0:
resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
@@ -3488,9 +3217,6 @@ packages:
resolution: {integrity: sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==}
engines: {node: '>=18'}
- path-browserify@1.0.1:
- resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==}
-
path-exists@4.0.0:
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
engines: {node: '>=8'}
@@ -3539,15 +3265,6 @@ packages:
resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==}
engines: {node: '>=12'}
- pkg-types@1.3.1:
- resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==}
-
- pkg-types@2.2.0:
- resolution: {integrity: sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ==}
-
- player.style@0.1.9:
- resolution: {integrity: sha512-aFmIhHMrnAP8YliFYFMnRw+5AlHqBvnqWy4vHGo2kFxlC+XjmTXqgg62qSxlE8ubAY83c0ViEZGYglSJi6mGCA==}
-
pluralize@8.0.0:
resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==}
engines: {node: '>=4'}
@@ -3580,9 +3297,6 @@ packages:
resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
engines: {node: '>=6'}
- quansync@0.2.10:
- resolution: {integrity: sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A==}
-
query-string@9.2.2:
resolution: {integrity: sha512-pDSIZJ9sFuOp6VnD+5IkakSVf+rICAuuU88Hcsr6AKL0QtxSIfVuKiVP2oahFI7tk3CRSexwV+Ya6MOoTxzg9g==}
engines: {node: '>=18'}
@@ -3706,13 +3420,6 @@ packages:
react-is@17.0.2:
resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==}
- react-player@3.3.1:
- resolution: {integrity: sha512-wE/xLloneXZ1keelFCaNeIFVNUp4/7YoUjfHjwF945aQzsbDKiIB0LQuCchGL+la0Y1IybxnR0R6Cm3AiqInMw==}
- peerDependencies:
- '@types/react': ^17.0.0 || ^18 || ^19
- react: ^17.0.2 || ^18 || ^19
- react-dom: ^17.0.2 || ^18 || ^19
-
react-redux@9.2.0:
resolution: {integrity: sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==}
peerDependencies:
@@ -3819,9 +3526,6 @@ packages:
resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==}
engines: {node: '>=8'}
- redux-dynamic-middlewares@2.2.0:
- resolution: {integrity: sha512-GHESQC+Y0PV98ZBoaC6br6cDOsNiM1Cu4UleGMqMWCXX03jIr3BoozYVrRkLVVAl4sC216chakMnZOu6SwNdGA==}
-
redux-remember@5.2.0:
resolution: {integrity: sha512-HqXx9V+DKzgBzpiIT5dyiXZgiiSB6zaMs4sIscwQ+Z0zVwUvJh20mqPEQWo4wbthuo5+5jGrS7Yfvv4HyOuAFw==}
peerDependencies:
@@ -3947,9 +3651,6 @@ packages:
resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==}
engines: {node: '>=10'}
- sax@1.2.1:
- resolution: {integrity: sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==}
-
scheduler@0.23.2:
resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==}
@@ -3964,11 +3665,6 @@ packages:
resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==}
hasBin: true
- semver@7.5.4:
- resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==}
- engines: {node: '>=10'}
- hasBin: true
-
semver@7.7.2:
resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==}
engines: {node: '>=10'}
@@ -4072,12 +3768,6 @@ packages:
resolution: {integrity: sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==}
engines: {node: '>=12'}
- spotify-audio-element@1.0.2:
- resolution: {integrity: sha512-YEovyyeJTJMzdSVqFw/Fx19e1gdcD4bmZZ/fWS0Ji58KTpvAT2rophgK87ocqpy6eJNSmIHikhgbRjGWumgZew==}
-
- sprintf-js@1.0.3:
- resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==}
-
stable-hash@0.0.6:
resolution: {integrity: sha512-0afH4mobqTybYZsXImQRLOjHV4gvOW+92HdUIax9t7a8d9v54KWykEuMVIcXhD9BCi+w3kS4x7O6fmZQ3JlG/g==}
@@ -4112,10 +3802,6 @@ packages:
prettier:
optional: true
- string-argv@0.3.2:
- resolution: {integrity: sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==}
- engines: {node: '>=0.6.19'}
-
string-width@4.2.3:
resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
engines: {node: '>=8'}
@@ -4183,9 +3869,6 @@ packages:
stylis@4.3.6:
resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==}
- super-media-element@1.4.2:
- resolution: {integrity: sha512-9pP/CVNp4NF2MNlRzLwQkjiTgKKe9WYXrLh9+8QokWmMxz+zt2mf1utkWLco26IuA3AfVcTb//qtlTIjY3VHxA==}
-
supports-color@10.0.0:
resolution: {integrity: sha512-HRVVSbCCMbj7/kdWF9Q+bbckjBHLtHMEoJWlkmYzzdwhYMkjkOwubLM6t7NbWKjgKamGDrWL1++KrjUO1t9oAQ==}
engines: {node: '>=18'}
@@ -4210,9 +3893,6 @@ packages:
resolution: {integrity: sha512-dTEWWNu6JmeVXY0ZYoPuH5cRIwc0MeGbJwah9KUNYSJwommQpCzTySTpEe8Gs1J23aeWEuAobe4Ag7EHVt/LOg==}
engines: {node: '>=10'}
- tiktok-video-element@0.1.0:
- resolution: {integrity: sha512-PVWUlpDdQ/LPXi7x4/furfD7Xh1L72CgkGCaMsZBIjvxucMGm1DDPJdM9IhWBFfo6tuR4cYVO/v596r6GG/lvQ==}
-
tiny-invariant@1.3.3:
resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==}
@@ -4298,9 +3978,6 @@ packages:
tslib@2.8.1:
resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
- twitch-video-element@0.1.2:
- resolution: {integrity: sha512-/up4KiWiTYiav+CUo+/DbV8JhP4COwEKSo8h1H/Zft/5NzZ/ZiIQ48h7erFKvwzalN0GfkEGGIfwIzAO0h7FHQ==}
-
type-check@0.4.0:
resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
engines: {node: '>= 0.8.0'}
@@ -4325,23 +4002,11 @@ packages:
resolution: {integrity: sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==}
engines: {node: '>= 0.4'}
- typescript@5.8.2:
- resolution: {integrity: sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==}
- engines: {node: '>=14.17'}
- hasBin: true
-
typescript@5.8.3:
resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==}
engines: {node: '>=14.17'}
hasBin: true
- ua-parser-js@1.0.40:
- resolution: {integrity: sha512-z6PJ8Lml+v3ichVojCiB8toQJBuwR42ySM4ezjXIqXK3M0HczmKQ3LF4rhU55PfD99KEEXQG6yb7iOMyvYuHew==}
- hasBin: true
-
- ufo@1.6.1:
- resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==}
-
unbox-primitive@1.1.0:
resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==}
engines: {node: '>= 0.4'}
@@ -4443,28 +4108,11 @@ packages:
resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==}
hasBin: true
- vimeo-video-element@1.5.3:
- resolution: {integrity: sha512-OQWyGS9nTouMqfRJyvmAm/n6IRhZ7x3EfPAef+Q+inGBeHa3SylDbtyeB/rEBd4B/T/lcYBW3rjaD9W2DRYkiQ==}
-
vite-node@3.2.4:
resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==}
engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0}
hasBin: true
- vite-plugin-css-injected-by-js@3.5.2:
- resolution: {integrity: sha512-2MpU/Y+SCZyWUB6ua3HbJCrgnF0KACAsmzOQt1UvRVJCGF6S8xdA3ZUhWcWdM9ivG4I5az8PnQmwwrkC2CAQrQ==}
- peerDependencies:
- vite: '>2.0.0-0'
-
- vite-plugin-dts@4.5.4:
- resolution: {integrity: sha512-d4sOM8M/8z7vRXHHq/ebbblfaxENjogAAekcfcDCCwAyvGqnPrc7f4NZbvItS+g4WTgerW0xDwSz5qz11JT3vg==}
- peerDependencies:
- typescript: '*'
- vite: '*'
- peerDependenciesMeta:
- vite:
- optional: true
-
vite-plugin-eslint@1.8.1:
resolution: {integrity: sha512-PqdMf3Y2fLO9FsNPmMX+//2BF5SF8nEWspZdgl4kSt7UvHDRHVVfHvxsD7ULYzZrJDGRxR81Nq7TOFgwMnUang==}
peerDependencies:
@@ -4551,9 +4199,6 @@ packages:
resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==}
engines: {node: '>=0.10.0'}
- vscode-uri@3.1.0:
- resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==}
-
walk-up-path@4.0.0:
resolution: {integrity: sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A==}
engines: {node: 20 || >=22}
@@ -4561,10 +4206,6 @@ packages:
wcwidth@1.0.1:
resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==}
- weakmap-polyfill@2.0.4:
- resolution: {integrity: sha512-ZzxBf288iALJseijWelmECm/1x7ZwQn3sMYIkDr2VvZp7r6SEKuT8D0O9Wiq6L9Nl5mazrOMcmiZE/2NCenaxw==}
- engines: {node: '>=8.10.0'}
-
webidl-conversions@3.0.1:
resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==}
@@ -4600,9 +4241,6 @@ packages:
engines: {node: '>=8'}
hasBin: true
- wistia-video-element@1.3.3:
- resolution: {integrity: sha512-ZVC8HH8uV3mQGcSz10MACLDalao/0YdVverNN4GNFsOXiumfqSiZnRVc8WZEywgVckBkR7+yerQYESYPDzvTfQ==}
-
word-wrap@1.2.5:
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
engines: {node: '>=0.10.0'}
@@ -4650,9 +4288,6 @@ packages:
yallist@3.1.1:
resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==}
- yallist@4.0.0:
- resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==}
-
yaml-ast-parser@0.0.43:
resolution: {integrity: sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==}
@@ -4676,9 +4311,6 @@ packages:
resolution: {integrity: sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==}
engines: {node: '>=12.20'}
- youtube-video-element@1.6.1:
- resolution: {integrity: sha512-FDRgXlPxpe1bh6HlhL6GfJVcvVNaZKCcLEZ90X1G3Iu+z2g2cIhm2OWj9abPZq1Zqit6SY7Gwh13H9g7acoBnQ==}
-
zod-validation-error@3.5.3:
resolution: {integrity: sha512-OT5Y8lbUadqVZCsnyFaTQ4/O2mys4tj7PqhdbBCp7McPwvIEKfPtdA6QfPeFQK2/Rz5LgwmAXRJTugBNBi0btw==}
engines: {node: '>=18.0.0'}
@@ -5371,78 +5003,6 @@ snapshots:
'@types/react': 18.3.23
react: 18.3.1
- '@microsoft/api-extractor-model@7.30.6(@types/node@22.16.0)':
- dependencies:
- '@microsoft/tsdoc': 0.15.1
- '@microsoft/tsdoc-config': 0.17.1
- '@rushstack/node-core-library': 5.13.1(@types/node@22.16.0)
- transitivePeerDependencies:
- - '@types/node'
-
- '@microsoft/api-extractor@7.52.8(@types/node@22.16.0)':
- dependencies:
- '@microsoft/api-extractor-model': 7.30.6(@types/node@22.16.0)
- '@microsoft/tsdoc': 0.15.1
- '@microsoft/tsdoc-config': 0.17.1
- '@rushstack/node-core-library': 5.13.1(@types/node@22.16.0)
- '@rushstack/rig-package': 0.5.3
- '@rushstack/terminal': 0.15.3(@types/node@22.16.0)
- '@rushstack/ts-command-line': 5.0.1(@types/node@22.16.0)
- lodash: 4.17.21
- minimatch: 3.0.8
- resolve: 1.22.10
- semver: 7.5.4
- source-map: 0.6.1
- typescript: 5.8.2
- transitivePeerDependencies:
- - '@types/node'
-
- '@microsoft/tsdoc-config@0.17.1':
- dependencies:
- '@microsoft/tsdoc': 0.15.1
- ajv: 8.12.0
- jju: 1.4.0
- resolve: 1.22.10
-
- '@microsoft/tsdoc@0.15.1': {}
-
- '@mux/mux-data-google-ima@0.2.8':
- dependencies:
- mux-embed: 5.9.0
-
- '@mux/mux-player-react@3.5.3(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
- dependencies:
- '@mux/mux-player': 3.5.3(react@18.3.1)
- '@mux/playback-core': 0.30.1
- prop-types: 15.8.1
- react: 18.3.1
- react-dom: 18.3.1(react@18.3.1)
- optionalDependencies:
- '@types/react': 18.3.23
- '@types/react-dom': 18.3.7(@types/react@18.3.23)
-
- '@mux/mux-player@3.5.3(react@18.3.1)':
- dependencies:
- '@mux/mux-video': 0.26.1
- '@mux/playback-core': 0.30.1
- media-chrome: 4.11.1(react@18.3.1)
- player.style: 0.1.9(react@18.3.1)
- transitivePeerDependencies:
- - react
-
- '@mux/mux-video@0.26.1':
- dependencies:
- '@mux/mux-data-google-ima': 0.2.8
- '@mux/playback-core': 0.30.1
- castable-video: 1.1.10
- custom-media-element: 1.4.5
- media-tracks: 0.3.3
-
- '@mux/playback-core@0.30.1':
- dependencies:
- hls.js: 1.6.9
- mux-embed: 5.11.0
-
'@nanostores/react@1.0.0(nanostores@1.0.1)(react@18.3.1)':
dependencies:
nanostores: 1.0.1
@@ -5778,40 +5338,6 @@ snapshots:
'@rtsao/scc@1.1.0': {}
- '@rushstack/node-core-library@5.13.1(@types/node@22.16.0)':
- dependencies:
- ajv: 8.13.0
- ajv-draft-04: 1.0.0(ajv@8.13.0)
- ajv-formats: 3.0.1(ajv@8.13.0)
- fs-extra: 11.3.0
- import-lazy: 4.0.0
- jju: 1.4.0
- resolve: 1.22.10
- semver: 7.5.4
- optionalDependencies:
- '@types/node': 22.16.0
-
- '@rushstack/rig-package@0.5.3':
- dependencies:
- resolve: 1.22.10
- strip-json-comments: 3.1.1
-
- '@rushstack/terminal@0.15.3(@types/node@22.16.0)':
- dependencies:
- '@rushstack/node-core-library': 5.13.1(@types/node@22.16.0)
- supports-color: 8.1.1
- optionalDependencies:
- '@types/node': 22.16.0
-
- '@rushstack/ts-command-line@5.0.1(@types/node@22.16.0)':
- dependencies:
- '@rushstack/terminal': 0.15.3(@types/node@22.16.0)
- '@types/argparse': 1.0.38
- argparse: 1.0.10
- string-argv: 0.3.2
- transitivePeerDependencies:
- - '@types/node'
-
'@socket.io/component-emitter@3.1.2': {}
'@standard-schema/spec@1.0.0': {}
@@ -5893,8 +5419,6 @@ snapshots:
optionalDependencies:
typescript: 5.8.3
- '@svta/common-media-library@0.12.4': {}
-
'@swc/core-darwin-arm64@1.12.9':
optional: true
@@ -5977,8 +5501,6 @@ snapshots:
tslib: 2.8.1
optional: true
- '@types/argparse@1.0.38': {}
-
'@types/aria-query@5.0.4': {}
'@types/babel__core@7.20.5':
@@ -6176,13 +5698,6 @@ snapshots:
'@typescript-eslint/types': 8.37.0
eslint-visitor-keys: 4.2.1
- '@vercel/edge@1.2.2': {}
-
- '@vimeo/player@2.29.0':
- dependencies:
- native-promise-only: 0.8.1
- weakmap-polyfill: 2.0.4
-
'@vitejs/plugin-react-swc@3.10.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2))':
dependencies:
'@rolldown/pluginutils': 1.0.0-beta.11
@@ -6263,51 +5778,6 @@ snapshots:
loupe: 3.1.4
tinyrainbow: 2.0.0
- '@volar/language-core@2.4.17':
- dependencies:
- '@volar/source-map': 2.4.17
-
- '@volar/source-map@2.4.17': {}
-
- '@volar/typescript@2.4.17':
- dependencies:
- '@volar/language-core': 2.4.17
- path-browserify: 1.0.1
- vscode-uri: 3.1.0
-
- '@vue/compiler-core@3.5.17':
- dependencies:
- '@babel/parser': 7.28.0
- '@vue/shared': 3.5.17
- entities: 4.5.0
- estree-walker: 2.0.2
- source-map-js: 1.2.1
-
- '@vue/compiler-dom@3.5.17':
- dependencies:
- '@vue/compiler-core': 3.5.17
- '@vue/shared': 3.5.17
-
- '@vue/compiler-vue2@2.7.16':
- dependencies:
- de-indent: 1.0.2
- he: 1.2.0
-
- '@vue/language-core@2.2.0(typescript@5.8.3)':
- dependencies:
- '@volar/language-core': 2.4.17
- '@vue/compiler-dom': 3.5.17
- '@vue/compiler-vue2': 2.7.16
- '@vue/shared': 3.5.17
- alien-signals: 0.4.14
- minimatch: 9.0.5
- muggle-string: 0.4.1
- path-browserify: 1.0.1
- optionalDependencies:
- typescript: 5.8.3
-
- '@vue/shared@3.5.17': {}
-
'@xobotyi/scrollbar-width@1.9.5': {}
'@xyflow/react@12.8.2(@types/react@18.3.23)(immer@10.1.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
@@ -6354,14 +5824,6 @@ snapshots:
agent-base@7.1.3: {}
- ajv-draft-04@1.0.0(ajv@8.13.0):
- optionalDependencies:
- ajv: 8.13.0
-
- ajv-formats@3.0.1(ajv@8.13.0):
- optionalDependencies:
- ajv: 8.13.0
-
ajv@6.12.6:
dependencies:
fast-deep-equal: 3.1.3
@@ -6369,22 +5831,6 @@ snapshots:
json-schema-traverse: 0.4.1
uri-js: 4.4.1
- ajv@8.12.0:
- dependencies:
- fast-deep-equal: 3.1.3
- json-schema-traverse: 1.0.0
- require-from-string: 2.0.2
- uri-js: 4.4.1
-
- ajv@8.13.0:
- dependencies:
- fast-deep-equal: 3.1.3
- json-schema-traverse: 1.0.0
- require-from-string: 2.0.2
- uri-js: 4.4.1
-
- alien-signals@0.4.14: {}
-
ansi-colors@4.1.3: {}
ansi-regex@5.0.1: {}
@@ -6399,10 +5845,6 @@ snapshots:
ansi-styles@6.2.1: {}
- argparse@1.0.10:
- dependencies:
- sprintf-js: 1.0.3
-
argparse@2.0.1: {}
aria-hidden@1.2.6:
@@ -6516,19 +5958,6 @@ snapshots:
base64-js@1.5.1: {}
- bcp-47-match@2.0.3: {}
-
- bcp-47-normalize@2.3.0:
- dependencies:
- bcp-47: 2.1.0
- bcp-47-match: 2.0.3
-
- bcp-47@2.1.0:
- dependencies:
- is-alphabetical: 2.0.1
- is-alphanumerical: 2.0.1
- is-decimal: 2.0.1
-
better-opn@3.0.2:
dependencies:
open: 8.4.2
@@ -6591,14 +6020,6 @@ snapshots:
caniuse-lite@1.0.30001727: {}
- castable-video@1.1.10:
- dependencies:
- custom-media-element: 1.4.5
-
- ce-la-react@0.3.1(react@18.3.1):
- dependencies:
- react: 18.3.1
-
chai@5.2.0:
dependencies:
assertion-error: 2.0.1
@@ -6656,8 +6077,6 @@ snapshots:
clone@1.0.4: {}
- cloudflare-video-element@1.3.3: {}
-
cmdk@1.1.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.23)(react@18.3.1)
@@ -6670,8 +6089,6 @@ snapshots:
- '@types/react'
- '@types/react-dom'
- codem-isoboxer@0.3.10: {}
-
color-convert@2.0.1:
dependencies:
color-name: 1.1.4
@@ -6698,10 +6115,6 @@ snapshots:
tree-kill: 1.2.2
yargs: 17.7.2
- confbox@0.1.8: {}
-
- confbox@0.2.2: {}
-
convert-source-map@1.9.0: {}
convert-source-map@2.0.0: {}
@@ -6747,8 +6160,6 @@ snapshots:
csstype@3.1.3: {}
- custom-media-element@1.4.5: {}
-
d3-color@3.1.0: {}
d3-dispatch@3.0.1: {}
@@ -6785,24 +6196,6 @@ snapshots:
d3-selection: 3.0.0
d3-transition: 3.0.1(d3-selection@3.0.0)
- dash-video-element@0.1.6:
- dependencies:
- custom-media-element: 1.4.5
- dashjs: 5.0.3
-
- dashjs@5.0.3:
- dependencies:
- '@svta/common-media-library': 0.12.4
- bcp-47-match: 2.0.3
- bcp-47-normalize: 2.3.0
- codem-isoboxer: 0.3.10
- fast-deep-equal: 3.1.3
- html-entities: 2.6.0
- imsc: 1.1.5
- localforage: 1.10.0
- path-browserify: 1.0.1
- ua-parser-js: 1.0.40
-
data-view-buffer@1.0.2:
dependencies:
call-bound: 1.0.4
@@ -6821,8 +6214,6 @@ snapshots:
es-errors: 1.3.0
is-data-view: 1.0.2
- de-indent@1.0.2: {}
-
debug@3.2.7:
dependencies:
ms: 2.1.3
@@ -6929,8 +6320,6 @@ snapshots:
engine.io-parser@5.2.3: {}
- entities@4.5.0: {}
-
error-ex@1.3.2:
dependencies:
is-arrayish: 0.2.1
@@ -7276,8 +6665,6 @@ snapshots:
expect-type@1.2.1: {}
- exsolve@1.0.7: {}
-
fast-deep-equal@3.1.3: {}
fast-glob@3.3.3:
@@ -7501,22 +6888,10 @@ snapshots:
dependencies:
function-bind: 1.1.2
- he@1.2.0: {}
-
- hls-video-element@1.5.6:
- dependencies:
- custom-media-element: 1.4.5
- hls.js: 1.6.9
- media-tracks: 0.3.3
-
- hls.js@1.6.9: {}
-
hoist-non-react-statics@3.3.2:
dependencies:
react-is: 16.13.1
- html-entities@2.6.0: {}
-
html-escaper@2.0.2: {}
html-parse-stringify@3.0.1:
@@ -7552,8 +6927,6 @@ snapshots:
ignore@7.0.5: {}
- immediate@3.0.6: {}
-
immer@10.1.1: {}
import-fresh@3.3.1:
@@ -7561,12 +6934,6 @@ snapshots:
parent-module: 1.0.1
resolve-from: 4.0.0
- import-lazy@4.0.0: {}
-
- imsc@1.1.5:
- dependencies:
- sax: 1.2.1
-
imurmurhash@0.1.4: {}
indent-string@4.0.0: {}
@@ -7585,13 +6952,6 @@ snapshots:
hasown: 2.0.2
side-channel: 1.1.0
- is-alphabetical@2.0.1: {}
-
- is-alphanumerical@2.0.1:
- dependencies:
- is-alphabetical: 2.0.1
- is-decimal: 2.0.1
-
is-array-buffer@3.0.5:
dependencies:
call-bind: 1.0.8
@@ -7634,8 +6994,6 @@ snapshots:
call-bound: 1.0.4
has-tostringtag: 1.0.2
- is-decimal@2.0.1: {}
-
is-docker@2.2.1: {}
is-extglob@2.1.1: {}
@@ -7757,8 +7115,6 @@ snapshots:
jiti@2.4.2: {}
- jju@1.4.0: {}
-
js-cookie@2.2.1: {}
js-levenshtein@1.1.6: {}
@@ -7828,8 +7184,6 @@ snapshots:
zod: 3.25.76
zod-validation-error: 3.5.3(zod@3.25.76)
- kolorist@1.8.0: {}
-
konva@9.3.22: {}
levn@0.4.1:
@@ -7837,10 +7191,6 @@ snapshots:
prelude-ls: 1.2.1
type-check: 0.4.0
- lie@3.1.1:
- dependencies:
- immediate: 3.0.6
-
lines-and-columns@1.2.4: {}
linkify-react@4.3.1(linkifyjs@4.3.1)(react@18.3.1):
@@ -7857,16 +7207,6 @@ snapshots:
load-tsconfig@0.2.5: {}
- local-pkg@1.1.1:
- dependencies:
- mlly: 1.7.4
- pkg-types: 2.2.0
- quansync: 0.2.10
-
- localforage@1.10.0:
- dependencies:
- lie: 3.1.1
-
locate-path@6.0.0:
dependencies:
p-locate: 5.0.0
@@ -7900,10 +7240,6 @@ snapshots:
dependencies:
yallist: 3.1.1
- lru-cache@6.0.0:
- dependencies:
- yallist: 4.0.0
-
lz-string@1.5.0: {}
magic-string@0.30.17:
@@ -7926,21 +7262,6 @@ snapshots:
mdn-data@2.0.14: {}
- media-chrome@4.11.1(react@18.3.1):
- dependencies:
- '@vercel/edge': 1.2.2
- ce-la-react: 0.3.1(react@18.3.1)
- transitivePeerDependencies:
- - react
-
- media-chrome@4.13.0(react@18.3.1):
- dependencies:
- ce-la-react: 0.3.1(react@18.3.1)
- transitivePeerDependencies:
- - react
-
- media-tracks@0.3.3: {}
-
memoize-one@6.0.0: {}
merge2@1.4.1: {}
@@ -7954,10 +7275,6 @@ snapshots:
min-indent@1.0.1: {}
- minimatch@3.0.8:
- dependencies:
- brace-expansion: 1.1.12
-
minimatch@3.1.2:
dependencies:
brace-expansion: 1.1.12
@@ -7974,13 +7291,6 @@ snapshots:
minipass@7.1.2: {}
- mlly@1.7.4:
- dependencies:
- acorn: 8.15.0
- pathe: 2.0.3
- pkg-types: 1.3.1
- ufo: 1.6.1
-
moo@0.5.2: {}
motion-dom@11.18.1:
@@ -7995,12 +7305,6 @@ snapshots:
mtwist@1.0.2: {}
- muggle-string@0.4.1: {}
-
- mux-embed@5.11.0: {}
-
- mux-embed@5.9.0: {}
-
nano-css@5.6.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@jridgewell/sourcemap-codec': 1.5.4
@@ -8020,8 +7324,6 @@ snapshots:
nanostores@1.0.1: {}
- native-promise-only@0.8.1: {}
-
natural-compare@1.4.0: {}
nearley@2.20.1:
@@ -8197,8 +7499,6 @@ snapshots:
index-to-position: 1.1.0
type-fest: 4.41.0
- path-browserify@1.0.1: {}
-
path-exists@4.0.0: {}
path-exists@5.0.0: {}
@@ -8228,24 +7528,6 @@ snapshots:
picomatch@4.0.3: {}
- pkg-types@1.3.1:
- dependencies:
- confbox: 0.1.8
- mlly: 1.7.4
- pathe: 2.0.3
-
- pkg-types@2.2.0:
- dependencies:
- confbox: 0.2.2
- exsolve: 1.0.7
- pathe: 2.0.3
-
- player.style@0.1.9(react@18.3.1):
- dependencies:
- media-chrome: 4.11.1(react@18.3.1)
- transitivePeerDependencies:
- - react
-
pluralize@8.0.0: {}
possible-typed-array-names@1.1.0: {}
@@ -8274,8 +7556,6 @@ snapshots:
punycode@2.3.1: {}
- quansync@0.2.10: {}
-
query-string@9.2.2:
dependencies:
decode-uri-component: 0.4.1
@@ -8393,24 +7673,6 @@ snapshots:
react-is@17.0.2: {}
- react-player@3.3.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
- dependencies:
- '@mux/mux-player-react': 3.5.3(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
- '@types/react': 18.3.23
- cloudflare-video-element: 1.3.3
- dash-video-element: 0.1.6
- hls-video-element: 1.5.6
- react: 18.3.1
- react-dom: 18.3.1(react@18.3.1)
- spotify-audio-element: 1.0.2
- tiktok-video-element: 0.1.0
- twitch-video-element: 0.1.2
- vimeo-video-element: 1.5.3
- wistia-video-element: 1.3.3
- youtube-video-element: 1.6.1
- transitivePeerDependencies:
- - '@types/react-dom'
-
react-redux@9.2.0(@types/react@18.3.23)(react@18.3.1)(redux@5.0.1):
dependencies:
'@types/use-sync-external-store': 0.0.6
@@ -8556,8 +7818,6 @@ snapshots:
indent-string: 4.0.0
strip-indent: 3.0.0
- redux-dynamic-middlewares@2.2.0: {}
-
redux-remember@5.2.0(redux@5.0.1):
dependencies:
redux: 5.0.1
@@ -8705,8 +7965,6 @@ snapshots:
safe-stable-stringify@2.5.0: {}
- sax@1.2.1: {}
-
scheduler@0.23.2:
dependencies:
loose-envify: 1.4.0
@@ -8717,10 +7975,6 @@ snapshots:
semver@6.3.1: {}
- semver@7.5.4:
- dependencies:
- lru-cache: 6.0.0
-
semver@7.7.2: {}
serialize-error@12.0.0:
@@ -8831,10 +8085,6 @@ snapshots:
split-on-first@3.0.0: {}
- spotify-audio-element@1.0.2: {}
-
- sprintf-js@1.0.3: {}
-
stable-hash@0.0.6: {}
stack-generator@2.0.10:
@@ -8884,8 +8134,6 @@ snapshots:
- supports-color
- utf-8-validate
- string-argv@0.3.2: {}
-
string-width@4.2.3:
dependencies:
emoji-regex: 8.0.0
@@ -8976,8 +8224,6 @@ snapshots:
stylis@4.3.6: {}
- super-media-element@1.4.2: {}
-
supports-color@10.0.0: {}
supports-color@7.2.0:
@@ -8998,8 +8244,6 @@ snapshots:
throttle-debounce@3.0.1: {}
- tiktok-video-element@0.1.0: {}
-
tiny-invariant@1.3.3: {}
tinybench@2.9.0: {}
@@ -9062,8 +8306,6 @@ snapshots:
tslib@2.8.1: {}
- twitch-video-element@0.1.2: {}
-
type-check@0.4.0:
dependencies:
prelude-ls: 1.2.1
@@ -9103,14 +8345,8 @@ snapshots:
possible-typed-array-names: 1.1.0
reflect.getprototypeof: 1.0.10
- typescript@5.8.2: {}
-
typescript@5.8.3: {}
- ua-parser-js@1.0.40: {}
-
- ufo@1.6.1: {}
-
unbox-primitive@1.1.0:
dependencies:
call-bound: 1.0.4
@@ -9191,10 +8427,6 @@ snapshots:
uuid@11.1.0: {}
- vimeo-video-element@1.5.3:
- dependencies:
- '@vimeo/player': 2.29.0
-
vite-node@3.2.4(@types/node@22.16.0)(jiti@2.4.2):
dependencies:
cac: 6.7.14
@@ -9216,29 +8448,6 @@ snapshots:
- tsx
- yaml
- vite-plugin-css-injected-by-js@3.5.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)):
- dependencies:
- vite: 7.0.5(@types/node@22.16.0)(jiti@2.4.2)
-
- vite-plugin-dts@4.5.4(@types/node@22.16.0)(rollup@4.45.1)(typescript@5.8.3)(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)):
- dependencies:
- '@microsoft/api-extractor': 7.52.8(@types/node@22.16.0)
- '@rollup/pluginutils': 5.2.0(rollup@4.45.1)
- '@volar/typescript': 2.4.17
- '@vue/language-core': 2.2.0(typescript@5.8.3)
- compare-versions: 6.1.1
- debug: 4.4.1(supports-color@10.0.0)
- kolorist: 1.8.0
- local-pkg: 1.1.1
- magic-string: 0.30.17
- typescript: 5.8.3
- optionalDependencies:
- vite: 7.0.5(@types/node@22.16.0)(jiti@2.4.2)
- transitivePeerDependencies:
- - '@types/node'
- - rollup
- - supports-color
-
vite-plugin-eslint@1.8.1(eslint@9.31.0(jiti@2.4.2))(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)):
dependencies:
'@rollup/pluginutils': 4.2.1
@@ -9315,16 +8524,12 @@ snapshots:
void-elements@3.1.0: {}
- vscode-uri@3.1.0: {}
-
walk-up-path@4.0.0: {}
wcwidth@1.0.1:
dependencies:
defaults: 1.0.4
- weakmap-polyfill@2.0.4: {}
-
webidl-conversions@3.0.1: {}
webpack-virtual-modules@0.6.2: {}
@@ -9384,10 +8589,6 @@ snapshots:
siginfo: 2.0.0
stackback: 0.0.2
- wistia-video-element@1.3.3:
- dependencies:
- super-media-element: 1.4.2
-
word-wrap@1.2.5: {}
wrap-ansi@7.0.0:
@@ -9412,8 +8613,6 @@ snapshots:
yallist@3.1.1: {}
- yallist@4.0.0: {}
-
yaml-ast-parser@0.0.43: {}
yaml@1.10.2: {}
@@ -9434,8 +8633,6 @@ snapshots:
yocto-queue@1.2.1: {}
- youtube-video-element@1.6.1: {}
-
zod-validation-error@3.5.3(zod@3.25.76):
dependencies:
zod: 3.25.76
diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json
index aacd5c728f1..0343e1af19b 100644
--- a/invokeai/frontend/web/public/locales/en.json
+++ b/invokeai/frontend/web/public/locales/en.json
@@ -43,8 +43,6 @@
"move": "Move",
"movingImagesToBoard_one": "Moving {{count}} image to board:",
"movingImagesToBoard_other": "Moving {{count}} images to board:",
- "movingVideosToBoard_one": "Moving {{count}} video to board:",
- "movingVideosToBoard_other": "Moving {{count}} videos to board:",
"myBoard": "My Board",
"noBoards": "No {{boardType}} Boards",
"noMatching": "No matching Boards",
@@ -61,8 +59,6 @@
"imagesWithCount_other": "{{count}} images",
"assetsWithCount_one": "{{count}} asset",
"assetsWithCount_other": "{{count}} assets",
- "videosWithCount_one": "{{count}} video",
- "videosWithCount_other": "{{count}} videos",
"updateBoardError": "Error updating board"
},
"accordions": {
@@ -375,9 +371,6 @@
"deleteImage_one": "Delete Image",
"deleteImage_other": "Delete {{count}} Images",
"deleteImagePermanent": "Deleted images cannot be restored.",
- "deleteVideo_one": "Delete Video",
- "deleteVideo_other": "Delete {{count}} Videos",
- "deleteVideoPermanent": "Deleted videos cannot be restored.",
"displayBoardSearch": "Board Search",
"displaySearch": "Image Search",
"download": "Download",
@@ -397,7 +390,6 @@
"sortDirection": "Sort Direction",
"showStarredImagesFirst": "Show Starred Images First",
"noImageSelected": "No Image Selected",
- "noVideoSelected": "No Video Selected",
"noImagesInGallery": "No Images to Display",
"starImage": "Star",
"unstarImage": "Unstar",
@@ -429,9 +421,7 @@
"openViewer": "Open Viewer",
"closeViewer": "Close Viewer",
"move": "Move",
- "useForPromptGeneration": "Use for Prompt Generation",
- "videos": "Videos",
- "videosTab": "Videos you've created and saved within Invoke."
+ "useForPromptGeneration": "Use for Prompt Generation"
},
"hotkeys": {
"hotkeys": "Hotkeys",
@@ -476,10 +466,6 @@
"title": "Select the Queue Tab",
"desc": "Selects the Queue tab."
},
- "selectVideoTab": {
- "title": "Select the Video Tab",
- "desc": "Selects the Video tab."
- },
"focusPrompt": {
"title": "Focus Prompt",
"desc": "Move cursor focus to the positive prompt."
@@ -514,9 +500,6 @@
"key": "1"
}
},
- "video": {
- "title": "Video"
- },
"canvas": {
"title": "Canvas",
"selectBrushTool": {
@@ -823,13 +806,11 @@
"guidance": "Guidance",
"height": "Height",
"imageDetails": "Image Details",
- "videoDetails": "Video Details",
"imageDimensions": "Image Dimensions",
"metadata": "Metadata",
"model": "Model",
"negativePrompt": "Negative Prompt",
"noImageDetails": "No image details found",
- "noVideoDetails": "No video details found",
"noMetaData": "No metadata found",
"noRecallParameters": "No parameters to recall found",
"parameterSet": "Parameter {{parameter}} set",
@@ -847,11 +828,7 @@
"vae": "VAE",
"width": "Width",
"workflow": "Workflow",
- "canvasV2Metadata": "Canvas Layers",
- "videoModel": "Model",
- "videoDuration": "Duration",
- "videoAspectRatio": "Aspect Ratio",
- "videoResolution": "Resolution"
+ "canvasV2Metadata": "Canvas Layers"
},
"modelManager": {
"active": "active",
@@ -1269,13 +1246,9 @@
"images": "Images",
"images_withCount_one": "Image",
"images_withCount_other": "Images",
- "videos_withCount_one": "Video",
- "videos_withCount_other": "Videos",
"infillMethod": "Infill Method",
"infillColorValue": "Fill Color",
"info": "Info",
- "startingFrameImage": "Start Frame",
- "startingFrameImageAspectRatioWarning": "Image aspect ratio does not match the video aspect ratio ({{videoAspectRatio}}). This could lead to unexpected cropping during video generation.",
"invoke": {
"addingImagesTo": "Adding images to",
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.",
@@ -1322,8 +1295,7 @@
"noNodesInGraph": "No nodes in graph",
"systemDisconnected": "System disconnected",
"promptExpansionPending": "Prompt expansion in progress",
- "promptExpansionResultPending": "Please accept or discard your prompt expansion result",
- "videoIsDisabled": "Video generation is not enabled for {{accountType}} accounts."
+ "promptExpansionResultPending": "Please accept or discard your prompt expansion result"
},
"maskBlur": "Mask Blur",
"negativePromptPlaceholder": "Negative Prompt",
@@ -1341,11 +1313,9 @@
"seamlessXAxis": "Seamless X Axis",
"seamlessYAxis": "Seamless Y Axis",
"seed": "Seed",
- "videoActions": "Video Actions",
"imageActions": "Image Actions",
"sendToCanvas": "Send To Canvas",
"sendToUpscale": "Send To Upscale",
- "sendToVideo": "Send To Video",
"showOptionsPanel": "Show Side Panel (O or T)",
"shuffle": "Shuffle Seed",
"steps": "Steps",
@@ -1357,7 +1327,6 @@
"postProcessing": "Post-Processing (Shift + U)",
"processImage": "Process Image",
"upscaling": "Upscaling",
- "video": "Video",
"useAll": "Use All",
"useSize": "Use Size",
"useCpuNoise": "Use CPU Noise",
@@ -2643,12 +2612,7 @@
"viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box.",
"togglePromptPreviews": "Toggle Prompt Previews"
},
- "upsell": {
- "inviteTeammates": "Invite Teammates",
- "professional": "Professional",
- "professionalUpsell": "Available in Invoke's Professional Edition. Click here or visit invoke.com/pricing for more details.",
- "shareAccess": "Share Access"
- },
+
"ui": {
"tabs": {
"generate": "Generate",
@@ -2660,30 +2624,19 @@
"queue": "Queue",
"upscaling": "Upscaling",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
- "video": "Video",
"gallery": "Gallery"
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Workflow Editor",
"imageViewer": "Viewer",
- "canvas": "Canvas",
- "video": "Video"
+ "canvas": "Canvas"
},
"launchpad": {
"workflowsTitle": "Go deep with Workflows.",
"upscalingTitle": "Upscale and add detail.",
"canvasTitle": "Edit and refine on Canvas.",
"generateTitle": "Generate images from text prompts.",
- "videoTitle": "Generate videos from text prompts.",
- "video": {
- "startingFrameCalloutTitle": "Add a Starting Frame",
- "startingFrameCalloutDesc": "Add an image to control the first frame of your video."
- },
- "addStartingFrame": {
- "title": "Add a Starting Frame",
- "description": "Add an image to control the first frame of your video."
- },
"modelGuideText": "Want to learn what prompts work best for each model?",
"modelGuideLink": "Check out our Model Guide.",
"createNewWorkflowFromScratch": "Create a new Workflow from scratch",
@@ -2758,10 +2711,6 @@
}
}
},
- "video": {
- "noVideoSelected": "No video selected",
- "selectFromGallery": "Select a video from the gallery to play"
- },
"system": {
"enableLogging": "Enable Logging",
"logLevel": {
diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx
index 0f23e05a71f..1c1af39a000 100644
--- a/invokeai/frontend/web/src/app/components/App.tsx
+++ b/invokeai/frontend/web/src/app/components/App.tsx
@@ -1,41 +1,28 @@
import { Box } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
-import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction';
import { clearStorage } from 'app/store/enhancers/reduxRemember/driver';
-import type { PartialAppConfig } from 'app/types/invokeai';
-import Loading from 'common/components/Loading/Loading';
import { AppContent } from 'features/ui/components/AppContent';
-import { memo, useCallback } from 'react';
+import { memo } from 'react';
import { ErrorBoundary } from 'react-error-boundary';
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
import ThemeLocaleProvider from './ThemeLocaleProvider';
-const DEFAULT_CONFIG = {};
-interface Props {
- config?: PartialAppConfig;
- studioInitAction?: StudioInitAction;
-}
-
-const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
- const didStudioInit = useStore($didStudioInit);
-
- const handleReset = useCallback(() => {
- clearStorage();
- location.reload();
- return false;
- }, []);
+const errorBoundaryOnReset = () => {
+ clearStorage();
+ location.reload();
+ return false;
+};
+const App = () => {
return (
-
+
- {!didStudioInit && }
-
+
diff --git a/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx b/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx
index f061ba15f9b..f22a94c33fc 100644
--- a/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx
+++ b/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx
@@ -1,8 +1,5 @@
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
-import { createSelector } from '@reduxjs/toolkit';
-import { useAppSelector } from 'app/store/storeHooks';
import { useClipboard } from 'common/hooks/useClipboard';
-import { selectConfigSlice } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import newGithubIssueUrl from 'new-github-issue-url';
import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg';
@@ -16,11 +13,8 @@ type Props = {
resetErrorBoundary: () => void;
};
-const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
-
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
const { t } = useTranslation();
- const isLocal = useAppSelector(selectIsLocal);
const clipboard = useClipboard();
const handleCopy = useCallback(() => {
@@ -34,17 +28,13 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
}, [clipboard, error, t]);
const url = useMemo(() => {
- if (isLocal) {
- return newGithubIssueUrl({
- user: 'invoke-ai',
- repo: 'InvokeAI',
- template: 'BUG_REPORT.yml',
- title: `[bug]: ${error.name}: ${error.message}`,
- });
- } else {
- return 'https://support.invoke.ai/support/tickets/new';
- }
- }, [error.message, error.name, isLocal]);
+ return newGithubIssueUrl({
+ user: 'invoke-ai',
+ repo: 'InvokeAI',
+ template: 'BUG_REPORT.yml',
+ title: `[bug]: ${error.name}: ${error.message}`,
+ });
+ }, [error.message, error.name]);
return (
@@ -75,9 +65,7 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
{t('common.copyError')}
- }>
- {isLocal ? t('accessibility.createIssue') : t('accessibility.submitSupportTicket')}
-
+ }>{t('accessibility.createIssue')}
diff --git a/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx b/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx
index 0a21348e984..77e8412daa7 100644
--- a/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx
+++ b/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx
@@ -1,14 +1,10 @@
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
import { setupListeners } from '@reduxjs/toolkit/query';
-import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
-import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
+import { useSyncFaviconQueueStatus } from 'app/hooks/useSyncFaviconQueueStatus';
import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection';
-import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
-import { useLogger } from 'app/logging/useLogger';
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import type { PartialAppConfig } from 'app/types/invokeai';
import { useFocusRegionWatcher } from 'common/hooks/focus';
import { useCloseChakraTooltipsOnDragFix } from 'common/hooks/useCloseChakraTooltipsOnDragFix';
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
@@ -19,7 +15,6 @@ import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/w
import { useSyncExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
import { useSyncNodeErrors } from 'features/nodes/store/util/fieldValidators';
import { useReadinessWatcher } from 'features/queue/store/readiness';
-import { configChanged } from 'features/system/store/configSlice';
import { selectLanguage } from 'features/system/store/systemSelectors';
import { useNavigationApi } from 'features/ui/layouts/use-navigation-api';
import i18n from 'i18n';
@@ -34,55 +29,46 @@ const queueCountArg = { destination: 'canvas' };
* GlobalHookIsolator is a logical component that runs global hooks in an isolated component, so that they do not
* cause needless re-renders of any other components.
*/
-export const GlobalHookIsolator = memo(
- ({ config, studioInitAction }: { config: PartialAppConfig; studioInitAction?: StudioInitAction }) => {
- const language = useAppSelector(selectLanguage);
- const logger = useLogger('system');
- const dispatch = useAppDispatch();
+export const GlobalHookIsolator = memo(() => {
+ const language = useAppSelector(selectLanguage);
+ const dispatch = useAppDispatch();
- // singleton!
- useReadinessWatcher();
- useSocketIO();
- useGlobalModifiersInit();
- useGlobalHotkeys();
- useGetOpenAPISchemaQuery();
- useSyncLoggingConfig();
- useCloseChakraTooltipsOnDragFix();
- useNavigationApi();
- useDndMonitor();
- useSyncNodeErrors();
- useSyncLangDirection();
+ // singleton!
+ useReadinessWatcher();
+ useSocketIO();
+ useGlobalModifiersInit();
+ useGlobalHotkeys();
+ useGetOpenAPISchemaQuery();
+ useSyncLoggingConfig();
+ useCloseChakraTooltipsOnDragFix();
+ useNavigationApi();
+ useDndMonitor();
+ useSyncNodeErrors();
+ useSyncLangDirection();
- // Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
- // and/or in progress canvas sessions.
- useGetQueueCountsByDestinationQuery(queueCountArg);
- useSyncExecutionState();
+ // Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
+ // and/or in progress canvas sessions.
+ useGetQueueCountsByDestinationQuery(queueCountArg);
+ useSyncExecutionState();
- useEffect(() => {
- i18n.changeLanguage(language);
- }, [language]);
+ useEffect(() => {
+ i18n.changeLanguage(language);
+ }, [language]);
- useEffect(() => {
- logger.info({ config }, 'Received config');
- dispatch(configChanged(config));
- }, [dispatch, config, logger]);
+ useEffect(() => {
+ dispatch(appStarted());
+ }, [dispatch]);
- useEffect(() => {
- dispatch(appStarted());
- }, [dispatch]);
+ useEffect(() => {
+ return setupListeners(dispatch);
+ }, [dispatch]);
- useEffect(() => {
- return setupListeners(dispatch);
- }, [dispatch]);
+ useStarterModelsToast();
+ useSyncFaviconQueueStatus();
+ useFocusRegionWatcher();
+ useWorkflowBuilderWatcher();
+ useDynamicPromptsWatcher();
- useStudioInitAction(studioInitAction);
- useStarterModelsToast();
- useSyncQueueStatus();
- useFocusRegionWatcher();
- useWorkflowBuilderWatcher();
- useDynamicPromptsWatcher();
-
- return null;
- }
-);
+ return null;
+});
GlobalHookIsolator.displayName = 'GlobalHookIsolator';
diff --git a/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx b/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx
index c86faa50bbf..dd1595bdd74 100644
--- a/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx
+++ b/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx
@@ -16,7 +16,7 @@ import type { ImageDTO } from 'services/api/types';
export const GlobalImageHotkeys = memo(() => {
useAssertSingleton('GlobalImageHotkeys');
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const imageDTO = useImageDTO(lastSelectedItem?.type === 'image' ? lastSelectedItem.id : null);
+ const imageDTO = useImageDTO(lastSelectedItem ?? null);
if (!imageDTO) {
return null;
diff --git a/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx b/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx
index b5aec1dd561..5c1446662ef 100644
--- a/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx
+++ b/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx
@@ -4,13 +4,10 @@ import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteM
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { CropImageModal } from 'features/cropper/components/CropImageModal';
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
-import { DeleteVideoModal } from 'features/deleteVideoModal/components/DeleteVideoModal';
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
import { ImageContextMenu } from 'features/gallery/components/ContextMenu/ImageContextMenu';
-import { VideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu';
-import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal';
import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal';
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
@@ -34,7 +31,6 @@ export const GlobalModalIsolator = memo(() => {
return (
<>
-
@@ -46,12 +42,10 @@ export const GlobalModalIsolator = memo(() => {
-
-
diff --git a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx
index 21eee66513f..775a4c7a963 100644
--- a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx
+++ b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx
@@ -1,309 +1,33 @@
import 'i18n';
-import type { InvokeAIUIProps } from 'app/components/types';
-import { $didStudioInit } from 'app/hooks/useStudioInitAction';
-import { $loggingOverrides, configureLogging } from 'app/logging/logger';
+import { configureLogging } from 'app/logging/logger';
import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver';
-import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
-import { $accountTypeText } from 'app/store/nanostores/accountTypeText';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { $baseUrl } from 'app/store/nanostores/baseUrl';
-import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
-import { $customStarUI } from 'app/store/nanostores/customStarUI';
-import { $isDebugging } from 'app/store/nanostores/isDebugging';
-import { $logo } from 'app/store/nanostores/logo';
-import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
-import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
-import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/projectId';
-import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
import { $store } from 'app/store/nanostores/store';
-import { $toastMap } from 'app/store/nanostores/toastMap';
-import { $videoUpsellComponent } from 'app/store/nanostores/videoUpsellComponent';
-import { $whatsNew } from 'app/store/nanostores/whatsNew';
import { createStore } from 'app/store/store';
import Loading from 'common/components/Loading/Loading';
-import {
- $workflowLibraryCategoriesOptions,
- $workflowLibrarySortOptions,
- $workflowLibraryTagCategoriesOptions,
- DEFAULT_WORKFLOW_LIBRARY_CATEGORIES,
- DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS,
- DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES,
-} from 'features/nodes/store/workflowLibrarySlice';
-import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react';
+import React, { lazy, memo, useEffect, useState } from 'react';
import { Provider } from 'react-redux';
-import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
-import { $socketOptions } from 'services/events/stores';
+
+/*
+ * We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
+ * possible opportunity.
+ *
+ * Once redux initializes, we will check the user's settings and update the logging config accordingly. See
+ * `useSyncLoggingConfig`.
+ */
+configureLogging(true, 'debug', '*');
const App = lazy(() => import('./App'));
-const InvokeAIUI = ({
- apiUrl,
- openAPISchemaUrl,
- token,
- config,
- customNavComponent,
- accountSettingsLink,
- middleware,
- projectId,
- projectName,
- projectUrl,
- queueId,
- studioInitAction,
- customStarUi,
- socketOptions,
- isDebugging = false,
- logo,
- toastMap,
- accountTypeText,
- videoUpsellComponent,
- workflowCategories,
- workflowTagCategories,
- workflowSortOptions,
- loggingOverrides,
- onClickGoToModelManager,
- whatsNew,
- storagePersistDebounce = 300,
-}: InvokeAIUIProps) => {
+const InvokeAIUI = () => {
const [store, setStore] = useState | undefined>(undefined);
const [didRehydrate, setDidRehydrate] = useState(false);
- useLayoutEffect(() => {
- /*
- * We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
- * possible opportunity.
- *
- * Once redux initializes, we will check the user's settings and update the logging config accordingly. See
- * `useSyncLoggingConfig`.
- */
- $loggingOverrides.set(loggingOverrides);
-
- // Until we get the user's settings, we will use the overrides OR default values.
- configureLogging(
- loggingOverrides?.logIsEnabled ?? true,
- loggingOverrides?.logLevel ?? 'debug',
- loggingOverrides?.logNamespaces ?? '*'
- );
- }, [loggingOverrides]);
-
- useLayoutEffect(() => {
- if (studioInitAction) {
- $didStudioInit.set(false);
- }
- }, [studioInitAction]);
-
- useEffect(() => {
- // configure API client token
- if (token) {
- $authToken.set(token);
- }
-
- // configure API client base url
- if (apiUrl) {
- $baseUrl.set(apiUrl);
- }
-
- // configure API client project header
- if (projectId) {
- $projectId.set(projectId);
- }
-
- // configure API client project header
- if (queueId) {
- $queueId.set(queueId);
- }
-
- // reset dynamically added middlewares
- resetMiddlewares();
-
- // TODO: at this point, after resetting the middleware, we really ought to clean up the socket
- // stuff by calling `dispatch(socketReset())`. but we cannot dispatch from here as we are
- // outside the provider. it's not needed until there is the possibility that we will change
- // the `apiUrl`/`token` dynamically.
-
- // rebuild socket middleware with token and apiUrl
- if (middleware && middleware.length > 0) {
- addMiddleware(...middleware);
- }
-
- return () => {
- // Reset the API client token and base url on unmount
- $baseUrl.set(undefined);
- $authToken.set(undefined);
- $projectId.set(undefined);
- $queueId.set(DEFAULT_QUEUE_ID);
- };
- }, [apiUrl, token, middleware, projectId, queueId, projectName, projectUrl]);
-
- useEffect(() => {
- if (customStarUi) {
- $customStarUI.set(customStarUi);
- }
-
- return () => {
- $customStarUI.set(undefined);
- };
- }, [customStarUi]);
-
- useEffect(() => {
- if (accountTypeText) {
- $accountTypeText.set(accountTypeText);
- }
-
- return () => {
- $accountTypeText.set('');
- };
- }, [accountTypeText]);
-
- useEffect(() => {
- if (videoUpsellComponent) {
- $videoUpsellComponent.set(videoUpsellComponent);
- }
-
- return () => {
- $videoUpsellComponent.set(undefined);
- };
- }, [videoUpsellComponent]);
-
- useEffect(() => {
- if (customNavComponent) {
- $customNavComponent.set(customNavComponent);
- }
-
- return () => {
- $customNavComponent.set(undefined);
- };
- }, [customNavComponent]);
-
- useEffect(() => {
- if (accountSettingsLink) {
- $accountSettingsLink.set(accountSettingsLink);
- }
-
- return () => {
- $accountSettingsLink.set(undefined);
- };
- }, [accountSettingsLink]);
-
- useEffect(() => {
- if (openAPISchemaUrl) {
- $openAPISchemaUrl.set(openAPISchemaUrl);
- }
-
- return () => {
- $openAPISchemaUrl.set(undefined);
- };
- }, [openAPISchemaUrl]);
-
- useEffect(() => {
- $projectName.set(projectName);
-
- return () => {
- $projectName.set(undefined);
- };
- }, [projectName]);
-
- useEffect(() => {
- $projectUrl.set(projectUrl);
-
- return () => {
- $projectUrl.set(undefined);
- };
- }, [projectUrl]);
-
- useEffect(() => {
- if (logo) {
- $logo.set(logo);
- }
-
- return () => {
- $logo.set(undefined);
- };
- }, [logo]);
-
- useEffect(() => {
- if (toastMap) {
- $toastMap.set(toastMap);
- }
-
- return () => {
- $toastMap.set(undefined);
- };
- }, [toastMap]);
-
- useEffect(() => {
- if (whatsNew) {
- $whatsNew.set(whatsNew);
- }
-
- return () => {
- $whatsNew.set(undefined);
- };
- }, [whatsNew]);
-
- useEffect(() => {
- if (onClickGoToModelManager) {
- $onClickGoToModelManager.set(onClickGoToModelManager);
- }
-
- return () => {
- $onClickGoToModelManager.set(undefined);
- };
- }, [onClickGoToModelManager]);
-
- useEffect(() => {
- if (workflowCategories) {
- $workflowLibraryCategoriesOptions.set(workflowCategories);
- }
-
- return () => {
- $workflowLibraryCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_CATEGORIES);
- };
- }, [workflowCategories]);
-
- useEffect(() => {
- if (workflowTagCategories) {
- $workflowLibraryTagCategoriesOptions.set(workflowTagCategories);
- }
-
- return () => {
- $workflowLibraryTagCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES);
- };
- }, [workflowTagCategories]);
-
- useEffect(() => {
- if (workflowSortOptions) {
- $workflowLibrarySortOptions.set(workflowSortOptions);
- }
-
- return () => {
- $workflowLibrarySortOptions.set(DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS);
- };
- }, [workflowSortOptions]);
-
- useEffect(() => {
- if (socketOptions) {
- $socketOptions.set(socketOptions);
- }
- return () => {
- $socketOptions.set({});
- };
- }, [socketOptions]);
-
- useEffect(() => {
- if (isDebugging) {
- $isDebugging.set(isDebugging);
- }
- return () => {
- $isDebugging.set(false);
- };
- }, [isDebugging]);
-
useEffect(() => {
const onRehydrated = () => {
setDidRehydrate(true);
};
- const store = createStore({ persist: true, persistDebounce: storagePersistDebounce, onRehydrated });
+ const store = createStore({ persist: true, persistDebounce: 300, onRehydrated });
setStore(store);
$store.set(store);
if (import.meta.env.MODE === 'development') {
@@ -318,7 +42,7 @@ const InvokeAIUI = ({
window.$store = undefined;
}
};
- }, [storagePersistDebounce]);
+ }, []);
if (!store || !didRehydrate) {
return ;
@@ -328,7 +52,7 @@ const InvokeAIUI = ({
}>
-
+
diff --git a/invokeai/frontend/web/src/app/components/types.ts b/invokeai/frontend/web/src/app/components/types.ts
deleted file mode 100644
index dbec6a72a86..00000000000
--- a/invokeai/frontend/web/src/app/components/types.ts
+++ /dev/null
@@ -1,43 +0,0 @@
-import type { Middleware } from '@reduxjs/toolkit';
-import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
-import type { LoggingOverrides } from 'app/logging/logger';
-import type { CustomStarUi } from 'app/store/nanostores/customStarUI';
-import type { PartialAppConfig } from 'app/types/invokeai';
-import type { SocketOptions } from 'dgram';
-import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
-import type { WorkflowCategory } from 'features/nodes/types/workflow';
-import type { ToastConfig } from 'features/toast/toast';
-import type { PropsWithChildren, ReactNode } from 'react';
-import type { ManagerOptions } from 'socket.io-client';
-
-export interface InvokeAIUIProps extends PropsWithChildren {
- apiUrl?: string;
- openAPISchemaUrl?: string;
- token?: string;
- config?: PartialAppConfig;
- customNavComponent?: ReactNode;
- accountSettingsLink?: string;
- middleware?: Middleware[];
- projectId?: string;
- projectName?: string;
- projectUrl?: string;
- queueId?: string;
- studioInitAction?: StudioInitAction;
- customStarUi?: CustomStarUi;
- socketOptions?: Partial;
- isDebugging?: boolean;
- logo?: ReactNode;
- toastMap?: Record;
- accountTypeText?: string;
- videoUpsellComponent?: ReactNode;
- whatsNew?: ReactNode[];
- workflowCategories?: WorkflowCategory[];
- workflowTagCategories?: WorkflowTagCategory[];
- workflowSortOptions?: WorkflowSortOption[];
- loggingOverrides?: LoggingOverrides;
- /**
- * If provided, overrides in-app navigation to the model manager
- */
- onClickGoToModelManager?: () => void;
- storagePersistDebounce?: number;
-}
diff --git a/invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts b/invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts
deleted file mode 100644
index 80c76e31484..00000000000
--- a/invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts
+++ /dev/null
@@ -1,262 +0,0 @@
-import { useStore } from '@nanostores/react';
-import { useAppStore } from 'app/store/storeHooks';
-import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
-import { withResultAsync } from 'common/util/result';
-import { canvasReset } from 'features/controlLayers/store/actions';
-import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
-import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
-import { imageDTOToImageObject } from 'features/controlLayers/store/util';
-import { sentImageToCanvas } from 'features/gallery/store/actions';
-import { MetadataUtils } from 'features/metadata/parsing';
-import { $hasTemplates } from 'features/nodes/store/nodesSlice';
-import { $isWorkflowLibraryModalOpen } from 'features/nodes/store/workflowLibraryModal';
-import {
- $workflowLibraryTagOptions,
- workflowLibraryTagsReset,
- workflowLibraryTagToggled,
- workflowLibraryViewChanged,
-} from 'features/nodes/store/workflowLibrarySlice';
-import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
-import { toast } from 'features/toast/toast';
-import { navigationApi } from 'features/ui/layouts/navigation-api';
-import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
-import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
-import { atom } from 'nanostores';
-import { useCallback, useEffect } from 'react';
-import { useTranslation } from 'react-i18next';
-import { getImageDTO, getImageMetadata } from 'services/api/endpoints/images';
-import { getStylePreset } from 'services/api/endpoints/stylePresets';
-
-type _StudioInitAction = { type: T; data: U };
-
-type LoadWorkflowAction = _StudioInitAction<'loadWorkflow', { workflowId: string }>;
-type SelectStylePresetAction = _StudioInitAction<'selectStylePreset', { stylePresetId: string }>;
-type SendToCanvasAction = _StudioInitAction<'sendToCanvas', { imageName: string }>;
-type UseAllParametersAction = _StudioInitAction<'useAllParameters', { imageName: string }>;
-type StudioDestinationAction = _StudioInitAction<
- 'goToDestination',
- {
- destination:
- | 'generation'
- | 'canvas'
- | 'workflows'
- | 'upscaling'
- | 'video'
- | 'viewAllWorkflows'
- | 'viewAllWorkflowsRecommended'
- | 'viewAllStylePresets';
- }
->;
-// Use global state to show loader until we are ready to render the studio.
-export const $didStudioInit = atom(false);
-
-export type StudioInitAction =
- | LoadWorkflowAction
- | SelectStylePresetAction
- | SendToCanvasAction
- | UseAllParametersAction
- | StudioDestinationAction;
-
-/**
- * A hook that performs an action when the studio is initialized. This is useful for deep linking into the studio.
- *
- * The action is performed only once, when the hook is first run.
- *
- * In this hook, we prefer to use imperative APIs over hooks to avoid re-rendering the parent component. For example:
- * - Use `getImageDTO` helper instead of `useGetImageDTO`
- * - Usee the `$imageViewer` atom instead of `useImageViewer`
- */
-export const useStudioInitAction = (action?: StudioInitAction) => {
- useAssertSingleton('useStudioInitAction');
- const { t } = useTranslation();
- const didParseOpenAPISchema = useStore($hasTemplates);
- const store = useAppStore();
- const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
- const workflowLibraryTagOptions = useStore($workflowLibraryTagOptions);
-
- const handleSendToCanvas = useCallback(
- async (imageName: string) => {
- // Try to the image DTO - use an imperative helper, rather than `useGetImageDTO`, so that we aren't re-rendering
- // the parent of this hook whenever the image name changes
- const getImageDTOResult = await withResultAsync(() => getImageDTO(imageName));
- if (getImageDTOResult.isErr()) {
- toast({
- title: t('toast.unableToLoadImage'),
- status: 'error',
- });
- return;
- }
- const imageDTO = getImageDTOResult.value;
- const imageObject = imageDTOToImageObject(imageDTO);
- const overrides: Partial = {
- objects: [imageObject],
- };
- await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
- store.dispatch(canvasReset());
- store.dispatch(rasterLayerAdded({ overrides, isSelected: true }));
- store.dispatch(sentImageToCanvas());
- toast({
- title: t('toast.sentToCanvas'),
- status: 'info',
- });
- },
- [store, t]
- );
-
- const handleUseAllMetadata = useCallback(
- async (imageName: string) => {
- // Try to the image metadata - use an imperative helper, rather than `useGetImageMetadata`, so that we aren't
- // re-rendering the parent of this hook whenever the image name changes
- const getImageMetadataResult = await withResultAsync(() => getImageMetadata(imageName));
- if (getImageMetadataResult.isErr()) {
- toast({
- title: t('toast.unableToLoadImageMetadata'),
- status: 'error',
- });
- return;
- }
- const metadata = getImageMetadataResult.value;
- store.dispatch(canvasReset());
- // This shows a toast
- await MetadataUtils.recallAllImageMetadata(metadata, store);
- },
- [store, t]
- );
-
- const handleLoadWorkflow = useCallback(
- (workflowId: string) => {
- // This shows a toast
- loadWorkflowWithDialog({
- type: 'library',
- data: workflowId,
- onSuccess: () => {
- navigationApi.switchToTab('workflows');
- },
- });
- },
- [loadWorkflowWithDialog]
- );
-
- const handleSelectStylePreset = useCallback(
- async (stylePresetId: string) => {
- const getStylePresetResult = await withResultAsync(() => getStylePreset(stylePresetId));
- if (getStylePresetResult.isErr()) {
- toast({
- title: t('toast.unableToLoadStylePreset'),
- status: 'error',
- });
- return;
- }
- store.dispatch(activeStylePresetIdChanged(stylePresetId));
- navigationApi.switchToTab('canvas');
- toast({
- title: t('toast.stylePresetLoaded'),
- status: 'info',
- });
- },
- [store, t]
- );
-
- const handleGoToDestination = useCallback(
- async (destination: StudioDestinationAction['data']['destination']) => {
- switch (destination) {
- case 'generation':
- // Go to the generate tab, open the launchpad
- await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
- break;
- case 'canvas':
- // Go to the canvas tab, open the launchpad
- await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
- break;
- case 'workflows':
- // Go to the workflows tab
- navigationApi.switchToTab('workflows');
- break;
- case 'upscaling':
- // Go to the upscaling tab
- navigationApi.switchToTab('upscaling');
- break;
- case 'video':
- // Go to the video tab
- await navigationApi.focusPanel('video', LAUNCHPAD_PANEL_ID);
- break;
- case 'viewAllWorkflows':
- // Go to the workflows tab and open the workflow library modal
- navigationApi.switchToTab('workflows');
- $isWorkflowLibraryModalOpen.set(true);
- break;
- case 'viewAllWorkflowsRecommended':
- // Go to the workflows tab and open the workflow library modal with the recommended workflows view
- navigationApi.switchToTab('workflows');
- $isWorkflowLibraryModalOpen.set(true);
- store.dispatch(workflowLibraryViewChanged('defaults'));
- store.dispatch(workflowLibraryTagsReset());
- for (const tag of workflowLibraryTagOptions) {
- if (tag.recommended) {
- store.dispatch(workflowLibraryTagToggled(tag.label));
- }
- }
- break;
- case 'viewAllStylePresets':
- // Go to the canvas tab and open the style presets menu
- navigationApi.switchToTab('canvas');
- $isStylePresetsMenuOpen.set(true);
- break;
- }
- },
- [store, workflowLibraryTagOptions]
- );
-
- const handleStudioInitAction = useCallback(
- async (action: StudioInitAction) => {
- // This cannot be in the useEffect below because we need to await some of the actions before setting didStudioInit.
- switch (action.type) {
- case 'loadWorkflow':
- await handleLoadWorkflow(action.data.workflowId);
- break;
- case 'selectStylePreset':
- await handleSelectStylePreset(action.data.stylePresetId);
- break;
-
- case 'sendToCanvas':
- await handleSendToCanvas(action.data.imageName);
- break;
-
- case 'useAllParameters':
- await handleUseAllMetadata(action.data.imageName);
- break;
-
- case 'goToDestination':
- handleGoToDestination(action.data.destination);
- break;
-
- default:
- break;
- }
- $didStudioInit.set(true);
- },
- [handleGoToDestination, handleLoadWorkflow, handleSelectStylePreset, handleSendToCanvas, handleUseAllMetadata]
- );
-
- useEffect(() => {
- if ($didStudioInit.get() || !didParseOpenAPISchema) {
- return;
- }
-
- if (!action) {
- $didStudioInit.set(true);
- return;
- }
-
- handleStudioInitAction(action);
- }, [
- handleSendToCanvas,
- handleUseAllMetadata,
- action,
- handleSelectStylePreset,
- handleGoToDestination,
- handleLoadWorkflow,
- didParseOpenAPISchema,
- handleStudioInitAction,
- ]);
-};
diff --git a/invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts b/invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts
new file mode 100644
index 00000000000..7bd55f25f4f
--- /dev/null
+++ b/invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts
@@ -0,0 +1,33 @@
+import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
+import { useEffect } from 'react';
+import { useGetQueueStatusQuery } from 'services/api/endpoints/queue';
+
+const baseTitle = document.title;
+const invokeLogoSVG = 'assets/images/invoke-favicon.svg';
+const invokeAlertLogoSVG = 'assets/images/invoke-alert-favicon.svg';
+
+const queryOptions = {
+ selectFromResult: (res) => ({
+ queueSize: res.data ? res.data.queue.pending + res.data.queue.in_progress : 0,
+ }),
+} satisfies Parameters[1];
+
+const updateFavicon = (queueSize: number) => {
+ document.title = queueSize > 0 ? `(${queueSize}) ${baseTitle}` : baseTitle;
+ const faviconEl = document.getElementById('invoke-favicon');
+ if (faviconEl instanceof HTMLLinkElement) {
+ faviconEl.href = queueSize > 0 ? invokeAlertLogoSVG : invokeLogoSVG;
+ }
+};
+
+/**
+ * This hook synchronizes the queue status with the page's title and favicon.
+ * It should be considered a singleton and only used once in the component tree.
+ */
+export const useSyncFaviconQueueStatus = () => {
+ useAssertSingleton('useSyncFaviconQueueStatus');
+ const { queueSize } = useGetQueueStatusQuery(undefined, queryOptions);
+ useEffect(() => {
+ updateFavicon(queueSize);
+ }, [queueSize]);
+};
diff --git a/invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts b/invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts
deleted file mode 100644
index d6874c3bb5e..00000000000
--- a/invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts
+++ /dev/null
@@ -1,25 +0,0 @@
-import { useEffect } from 'react';
-import { useGetQueueStatusQuery } from 'services/api/endpoints/queue';
-
-const baseTitle = document.title;
-const invokeLogoSVG = 'assets/images/invoke-favicon.svg';
-const invokeAlertLogoSVG = 'assets/images/invoke-alert-favicon.svg';
-
-/**
- * This hook synchronizes the queue status with the page's title and favicon.
- * It should be considered a singleton and only used once in the component tree.
- */
-export const useSyncQueueStatus = () => {
- const { queueSize } = useGetQueueStatusQuery(undefined, {
- selectFromResult: (res) => ({
- queueSize: res.data ? res.data.queue.pending + res.data.queue.in_progress : 0,
- }),
- });
- useEffect(() => {
- document.title = queueSize > 0 ? `(${queueSize}) ${baseTitle}` : baseTitle;
- const faviconEl = document.getElementById('invoke-favicon');
- if (faviconEl instanceof HTMLLinkElement) {
- faviconEl.href = queueSize > 0 ? invokeAlertLogoSVG : invokeLogoSVG;
- }
- }, [queueSize]);
-};
diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts
index 1f753f97bb7..6c843068df3 100644
--- a/invokeai/frontend/web/src/app/logging/logger.ts
+++ b/invokeai/frontend/web/src/app/logging/logger.ts
@@ -26,7 +26,6 @@ export const zLogNamespace = z.enum([
'system',
'queue',
'workflows',
- 'video',
]);
export type LogNamespace = z.infer;
@@ -36,20 +35,6 @@ export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fat
export type LogLevel = z.infer;
export const isLogLevel = (v: unknown): v is LogLevel => zLogLevel.safeParse(v).success;
-/**
- * Override logging settings.
- * @property logIsEnabled Override the enabled log state. Omit to use the user's settings.
- * @property logNamespaces Override the enabled log namespaces. Use `"*"` for all namespaces. Omit to use the user's settings.
- * @property logLevel Override the log level. Omit to use the user's settings.
- */
-export type LoggingOverrides = {
- logIsEnabled?: boolean;
- logNamespaces?: LogNamespace[] | '*';
- logLevel?: LogLevel;
-};
-
-export const $loggingOverrides = atom();
-
// Translate human-readable log levels to numbers, used for log filtering
const LOG_LEVEL_MAP: Record = {
trace: 10,
diff --git a/invokeai/frontend/web/src/app/logging/useLogger.ts b/invokeai/frontend/web/src/app/logging/useLogger.ts
deleted file mode 100644
index ac2a05cadbc..00000000000
--- a/invokeai/frontend/web/src/app/logging/useLogger.ts
+++ /dev/null
@@ -1,10 +0,0 @@
-import { useMemo } from 'react';
-
-import type { LogNamespace } from './logger';
-import { logger } from './logger';
-
-export const useLogger = (namespace: LogNamespace) => {
- const log = useMemo(() => logger(namespace), [namespace]);
-
- return log;
-};
diff --git a/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts b/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts
index fb4b2a7b8ee..ca8f26bb3fa 100644
--- a/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts
+++ b/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts
@@ -1,5 +1,4 @@
-import { useStore } from '@nanostores/react';
-import { $loggingOverrides, configureLogging } from 'app/logging/logger';
+import { configureLogging } from 'app/logging/logger';
import { useAppSelector } from 'app/store/storeHooks';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import {
@@ -20,24 +19,11 @@ import { useLayoutEffect } from 'react';
export const useSyncLoggingConfig = () => {
useAssertSingleton('useSyncLoggingConfig');
- const loggingOverrides = useStore($loggingOverrides);
-
const logLevel = useAppSelector(selectSystemLogLevel);
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
useLayoutEffect(() => {
- configureLogging(
- loggingOverrides?.logIsEnabled ?? logIsEnabled,
- loggingOverrides?.logLevel ?? logLevel,
- loggingOverrides?.logNamespaces ?? logNamespaces
- );
- }, [
- logIsEnabled,
- logLevel,
- logNamespaces,
- loggingOverrides?.logIsEnabled,
- loggingOverrides?.logLevel,
- loggingOverrides?.logNamespaces,
- ]);
+ configureLogging(logIsEnabled, logLevel, logNamespaces);
+ }, [logIsEnabled, logLevel, logNamespaces]);
};
diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts
index ef42a5fa2db..9e67770b436 100644
--- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts
+++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts
@@ -1,8 +1,5 @@
import { logger } from 'app/logging/logger';
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { $projectId } from 'app/store/nanostores/projectId';
-import { $queueId } from 'app/store/nanostores/queueId';
import type { UseStore } from 'idb-keyval';
import { createStore as idbCreateStore, del as idbDel, get as idbGet } from 'idb-keyval';
import type { Driver } from 'redux-remember';
@@ -19,24 +16,11 @@ const getUrl = (endpoint: 'get_by_key' | 'set_by_key' | 'delete', key?: string)
query['key'] = key;
}
- const path = buildV1Url(`client_state/${$queueId.get()}/${endpoint}`, query);
+ const path = buildV1Url(`client_state/default/${endpoint}`, query);
const url = `${baseUrl}/${path}`;
return url;
};
-const getHeaders = () => {
- const headers = new Headers();
- const authToken = $authToken.get();
- const projectId = $projectId.get();
- if (authToken) {
- headers.set('Authorization', `Bearer ${authToken}`);
- }
- if (projectId) {
- headers.set('project-id', projectId);
- }
- return headers;
-};
-
// Persistence happens per slice. To track when persistence is in progress, maintain a ref count, incrementing
// it when a slice is being persisted and decrementing it when the persistence is done.
let persistRefCount = 0;
@@ -87,8 +71,7 @@ const getIdbKey = (key: string) => {
const getItem = async (key: string) => {
try {
const url = getUrl('get_by_key', key);
- const headers = getHeaders();
- const res = await fetch(url, { method: 'GET', headers });
+ const res = await fetch(url, { method: 'GET' });
if (!res.ok) {
throw new Error(`Response status: ${res.status}`);
}
@@ -130,7 +113,6 @@ const getItem = async (key: string) => {
} catch (originalError) {
throw new StorageError({
key,
- projectId: $projectId.get(),
originalError,
});
}
@@ -148,8 +130,7 @@ const setItem = async (key: string, value: string) => {
}
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Persisting state for ${key}`);
const url = getUrl('set_by_key', key);
- const headers = getHeaders();
- const res = await fetch(url, { method: 'POST', headers, body: value });
+ const res = await fetch(url, { method: 'POST', body: value });
if (!res.ok) {
throw new Error(`Response status: ${res.status}`);
}
@@ -160,7 +141,6 @@ const setItem = async (key: string, value: string) => {
throw new StorageError({
key,
value,
- projectId: $projectId.get(),
originalError,
});
} finally {
@@ -178,8 +158,7 @@ export const clearStorage = async () => {
try {
persistRefCount++;
const url = getUrl('delete');
- const headers = getHeaders();
- const res = await fetch(url, { method: 'POST', headers });
+ const res = await fetch(url, { method: 'POST' });
if (!res.ok) {
throw new Error(`Response status: ${res.status}`);
}
diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts
index 9266ee478ff..87c89b27f51 100644
--- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts
+++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts
@@ -7,7 +7,6 @@ type StorageErrorArgs = {
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */ // any is correct
value?: any;
originalError?: unknown;
- projectId?: string;
};
export class StorageError extends Error {
@@ -15,18 +14,14 @@ export class StorageError extends Error {
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */ // any is correct
value?: any;
originalError?: Error;
- projectId?: string;
- constructor({ key, value, originalError, projectId }: StorageErrorArgs) {
+ constructor({ key, value, originalError }: StorageErrorArgs) {
super(`Error setting ${key}`);
this.name = 'StorageSetError';
this.key = key;
if (value !== undefined) {
this.value = value;
}
- if (projectId !== undefined) {
- this.projectId = projectId;
- }
if (originalError instanceof Error) {
this.originalError = originalError;
}
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts
deleted file mode 100644
index 4c8f139779a..00000000000
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-import type { AppStartListening } from 'app/store/store';
-import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
-import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
-import { appInfoApi } from 'services/api/endpoints/appInfo';
-
-export const addAppConfigReceivedListener = (startAppListening: AppStartListening) => {
- startAppListening({
- matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
- effect: (action, { getState, dispatch }) => {
- const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload;
- const infillMethod = getState().params.infillMethod;
-
- if (!infill_methods.includes(infillMethod)) {
- // If the selected infill method does not exist, prefer 'lama' if it's in the list, otherwise 'tile'.
- // TODO(psyche): lama _should_ always be in the list, but the API doesn't guarantee it...
- const infillMethod = infill_methods.includes('lama') ? 'lama' : 'tile';
- dispatch(setInfillMethod(infillMethod));
- }
-
- if (!nsfw_methods.includes('nsfw_checker')) {
- dispatch(shouldUseNSFWCheckerChanged(false));
- }
-
- if (!watermarking_methods.includes('invisible_watermark')) {
- dispatch(shouldUseWatermarkerChanged(false));
- }
- },
- });
-};
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts
index 5ed60447aae..6bff69c64a3 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts
@@ -1,7 +1,10 @@
import { createAction } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/store';
+import { noop } from 'es-toolkit';
+import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
-import { itemSelected } from 'features/gallery/store/gallerySlice';
+import { imageSelected } from 'features/gallery/store/gallerySlice';
+import { appInfoApi } from 'services/api/endpoints/appInfo';
import { imagesApi } from 'services/api/endpoints/images';
export const appStarted = createAction('app/appStarted');
@@ -9,23 +12,37 @@ export const appStarted = createAction('app/appStarted');
export const addAppStartedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: appStarted,
- effect: async (action, { unsubscribe, cancelActiveListeners, take, getState, dispatch }) => {
+ effect: (action, { unsubscribe, cancelActiveListeners, take, getState, dispatch }) => {
// this should only run once
cancelActiveListeners();
unsubscribe();
// ensure an image is selected when we load the first board
- const firstImageLoad = await take(imagesApi.endpoints.getImageNames.matchFulfilled);
- if (firstImageLoad !== null) {
+ take(imagesApi.endpoints.getImageNames.matchFulfilled).then((firstImageLoad) => {
+ if (firstImageLoad === null) {
+ // timeout or cancelled
+ return;
+ }
const [{ payload }] = firstImageLoad;
const selectedImage = selectLastSelectedItem(getState());
if (selectedImage) {
return;
}
if (payload.image_names[0]) {
- dispatch(itemSelected({ type: 'image', id: payload.image_names[0] }));
+ dispatch(imageSelected(payload.image_names[0]));
}
- }
+ });
+
+ dispatch(appInfoApi.endpoints.getPatchmatchStatus.initiate())
+ .unwrap()
+ .then((isPatchmatchAvailable) => {
+ const infillMethod = getState().params.infillMethod;
+
+ if (!isPatchmatchAvailable && infillMethod === 'patchmatch') {
+ dispatch(setInfillMethod('lama'));
+ }
+ })
+ .catch(noop);
},
});
};
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts
index 3a42213e535..9fd777fb29b 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts
@@ -1,14 +1,8 @@
import { isAnyOf } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/store';
-import {
- selectGalleryView,
- selectGetImageNamesQueryArgs,
- selectGetVideoIdsQueryArgs,
- selectSelectedBoardId,
-} from 'features/gallery/store/gallerySelectors';
-import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice';
+import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
+import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
-import { videosApi } from 'services/api/endpoints/videos';
export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => {
startAppListening({
@@ -25,57 +19,26 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening)
const state = getState();
const board_id = selectSelectedBoardId(state);
- const view = selectGalleryView(state);
- if (view === 'images' || view === 'assets') {
- const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id };
- // wait until the board has some images - maybe it already has some from a previous fetch
- // must use getState() to ensure we do not have stale state
- const isSuccess = await condition(
- () => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
- 5000
- );
+ const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id };
+ // wait until the board has some images - maybe it already has some from a previous fetch
+ // must use getState() to ensure we do not have stale state
+ const isSuccess = await condition(
+ () => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
+ 5000
+ );
- if (!isSuccess) {
- dispatch(itemSelected(null));
- return;
- }
-
- // the board was just changed - we can select the first image
- const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
-
- const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null;
-
- if (imageToSelect) {
- dispatch(itemSelected({ type: 'image', id: imageToSelect }));
- } else {
- dispatch(itemSelected(null));
- }
- } else {
- const queryArgs = { ...selectGetVideoIdsQueryArgs(state), board_id };
- // wait until the board has some images - maybe it already has some from a previous fetch
- // must use getState() to ensure we do not have stale state
- const isSuccess = await condition(
- () => videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).isSuccess,
- 5000
- );
-
- if (!isSuccess) {
- dispatch(itemSelected(null));
- return;
- }
+ if (!isSuccess) {
+ dispatch(imageSelected(null));
+ return;
+ }
- // the board was just changed - we can select the first image
- const videoIds = videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).data?.video_ids;
+ // the board was just changed - we can select the first image
+ const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
- const videoToSelect = videoIds && videoIds.length > 0 ? videoIds[0] : null;
+ const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null;
- if (videoToSelect) {
- dispatch(itemSelected({ type: 'video', id: videoToSelect }));
- } else {
- dispatch(itemSelected(null));
- }
- }
+ dispatch(imageSelected(imageToSelect ?? null));
},
});
};
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts
index f266f147ee8..416c77b9dd7 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts
@@ -13,13 +13,12 @@ const log = logger('system');
export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled,
- effect: (action, { getState }) => {
+ effect: (action) => {
const schemaJSON = action.payload;
log.debug({ schemaJSON: parseify(schemaJSON) } as JsonObject, 'Received OpenAPI schema');
- const { nodesAllowlist, nodesDenylist } = getState().config;
- const nodeTemplates = parseSchema(schemaJSON, nodesAllowlist, nodesDenylist);
+ const nodeTemplates = parseSchema(schemaJSON);
log.debug({ nodeTemplates } as JsonObject, `Built ${size(nodeTemplates)} node templates`);
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts
index 562dd7c27f3..f421009030f 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts
@@ -1,8 +1,6 @@
-import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening, RootState } from 'app/store/store';
import { omit } from 'es-toolkit/compat';
-import { imageUploadedClientSide } from 'features/gallery/store/actions';
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice';
import { toast } from 'features/toast/toast';
@@ -10,7 +8,6 @@ import { t } from 'i18next';
import { boardsApi } from 'services/api/endpoints/boards';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
-import { getCategories, getListImagesUrl } from 'services/api/util';
const log = logger('gallery');
/**
@@ -36,52 +33,20 @@ let lastUploadedToastTimeout: number | null = null;
export const addImageUploadedFulfilledListener = (startAppListening: AppStartListening) => {
startAppListening({
- matcher: isAnyOf(imagesApi.endpoints.uploadImage.matchFulfilled, imageUploadedClientSide),
+ matcher: imagesApi.endpoints.uploadImage.matchFulfilled,
effect: (action, { dispatch, getState }) => {
let imageDTO: ImageDTO;
let silent;
let isFirstUploadOfBatch = true;
-
- if (imageUploadedClientSide.match(action)) {
- imageDTO = action.payload.imageDTO;
- silent = action.payload.silent;
- isFirstUploadOfBatch = action.payload.isFirstUploadOfBatch;
- } else if (imagesApi.endpoints.uploadImage.matchFulfilled(action)) {
- imageDTO = action.payload;
- silent = action.meta.arg.originalArgs.silent;
- isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
- } else {
- return;
- }
+ imageDTO = action.payload;
+ silent = action.meta.arg.originalArgs.silent;
+ isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
if (silent || imageDTO.is_intermediate) {
// If the image is silent or intermediate, we don't want to show a toast
return;
}
- if (imageUploadedClientSide.match(action)) {
- const categories = getCategories(imageDTO);
- const boardId = imageDTO.board_id ?? 'none';
- dispatch(
- imagesApi.util.invalidateTags([
- {
- type: 'ImageList',
- id: getListImagesUrl({
- board_id: boardId,
- categories,
- }),
- },
- {
- type: 'Board',
- id: boardId,
- },
- {
- type: 'BoardImagesTotal',
- id: boardId,
- },
- ])
- );
- }
const state = getState();
log.debug({ imageDTO }, 'Image uploaded');
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
index e53fc977b98..d5c58528cb9 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
@@ -17,14 +17,8 @@ import { zParameterModel } from 'features/parameters/types/parameterSchemas';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { selectGlobalRefImageModels, selectRegionalRefImageModels } from 'services/api/hooks/modelsByType';
-import type { AnyModelConfig } from 'services/api/types';
-import {
- isChatGPT4oModelConfig,
- isFluxKontextApiModelConfig,
- isFluxKontextModelConfig,
- isFluxReduxModelConfig,
- isGemini2_5ModelConfig,
-} from 'services/api/types';
+import type { FLUXKontextModelConfig, FLUXReduxModelConfig, IPAdapterModelConfig } from 'services/api/types';
+import { isFluxKontextModelConfig, isFluxReduxModelConfig } from 'services/api/types';
const log = logger('models');
@@ -68,26 +62,19 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
// to choose the best available model based on the new main model.
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
- let newGlobalRefImageModel = null;
+ let newGlobalRefImageModel: IPAdapterModelConfig | FLUXKontextModelConfig | FLUXReduxModelConfig | null =
+ null;
// Certain models require the ref image model to be the same as the main model - others just need a matching
// base. Helper to grab the first exact match or the first available model if no exact match is found.
- const exactMatchOrFirst = (candidates: T[]): T | null =>
- candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
+ const exactMatchOrFirst = (
+ candidates: T[]
+ ): T | null => candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
- } else if (newModel.base === 'chatgpt-4o') {
- const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
- newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
- } else if (newModel.base === 'gemini-2.5') {
- const gemini2_5Models = allRefImageModels.filter(isGemini2_5ModelConfig);
- newGlobalRefImageModel = exactMatchOrFirst(gemini2_5Models);
- } else if (newModel.base === 'flux-kontext') {
- const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
- newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
} else if (newModel.base === 'flux') {
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts
index 63602339a9b..8cbbc72343b 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts
@@ -19,14 +19,12 @@ import {
isRegionalGuidanceFLUXReduxConfig,
isRegionalGuidanceIPAdapterConfig,
} from 'features/controlLayers/store/types';
-import { zModelIdentifierField } from 'features/nodes/types/common';
import { modelSelected } from 'features/parameters/store/actions';
import {
postProcessingModelChanged,
tileControlnetModelChanged,
upscaleModelChanged,
} from 'features/parameters/store/upscaleSlice';
-import { videoModelChanged } from 'features/parameters/store/videoSlice';
import {
zParameterCLIPEmbedModel,
zParameterSpandrelImageToImageModel,
@@ -49,7 +47,6 @@ import {
isRefinerMainModelModelConfig,
isSpandrelImageToImageModelConfig,
isT5EncoderModelConfigOrSubmodel,
- isVideoModelConfig,
} from 'services/api/types';
import type { JsonObject } from 'type-fest';
@@ -90,7 +87,6 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
handleCLIPEmbedModels(models, state, dispatch, log);
handleFLUXVAEModels(models, state, dispatch, log);
handleFLUXReduxModels(models, state, dispatch, log);
- handleVideoModels(models, state, dispatch, log);
},
});
};
@@ -123,19 +119,6 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
return;
}
- // If we have a default model, try to use it
- if (state.config.sd.defaultModel) {
- const defaultModel = allMainModels.find((m) => m.key === state.config.sd.defaultModel);
- if (defaultModel) {
- log.debug(
- { selectedMainModel, defaultModel },
- 'No selected main model or selected main model is not available, selecting default model'
- );
- dispatch(modelSelected(defaultModel));
- return;
- }
- }
-
log.debug(
{ selectedMainModel, firstModel },
'No selected main model or selected main model is not available, selecting first available model'
@@ -203,22 +186,6 @@ const handleLoRAModels: ModelHandler = (models, state, dispatch, log) => {
});
};
-const handleVideoModels: ModelHandler = (models, state, dispatch, log) => {
- const videoModels = models.filter(isVideoModelConfig);
- const selectedVideoModel = state.video.videoModel;
-
- if (selectedVideoModel && videoModels.some((m) => m.key === selectedVideoModel.key)) {
- return;
- }
-
- const firstModel = videoModels[0] || null;
- if (firstModel) {
- log.debug({ firstModel }, 'No video model selected, selecting first available video model');
- dispatch(videoModelChanged({ videoModel: zModelIdentifierField.parse(firstModel) }));
- return;
- }
-};
-
const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log) => {
const caModels = models.filter(isControlLayerModelConfig);
selectCanvasSlice(state).controlLayers.entities.forEach((entity) => {
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts
index 64370eee8bc..f8f7d1659a1 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts
@@ -1,7 +1,6 @@
import { objectEquals } from '@observ33r/object-equals';
import { createAction } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
-import { $baseUrl } from 'app/store/nanostores/baseUrl';
import type { AppStartListening } from 'app/store/store';
import { atom } from 'nanostores';
import { api } from 'services/api';
@@ -16,7 +15,7 @@ export const socketConnected = createAction('socket/connected');
export const addSocketConnectedEventListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: socketConnected,
- effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => {
+ effect: async (action, { dispatch, getState }) => {
/**
* The rest of this listener has recovery logic for when the socket disconnects and reconnects.
*
@@ -43,20 +42,12 @@ export const addSocketConnectedEventListener = (startAppListening: AppStartListe
// Else, we need to compare the last-known queue status with the current queue status, re-fetching
// everything if it has changed.
-
- if ($baseUrl.get()) {
- // If we have a baseUrl (e.g. not localhost), we need to debounce the re-fetch to not hammer server
- cancelActiveListeners();
- // Add artificial jitter to the debounce
- await delay(1000 + Math.random() * 1000);
- }
-
const prevQueueStatusData = selectQueueStatus(getState()).data;
try {
// Fetch the queue status again
const queueStatusRequest = dispatch(
- await queueApi.endpoints.getQueueStatus.initiate(undefined, {
+ queueApi.endpoints.getQueueStatus.initiate(undefined, {
forceRefetch: true,
subscribe: false,
})
diff --git a/invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts b/invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts
deleted file mode 100644
index cf41facb7c3..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import { atom } from 'nanostores';
-
-export const $accountSettingsLink = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts b/invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts
deleted file mode 100644
index 4008b86cef2..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import { atom } from 'nanostores';
-
-export const $accountTypeText = atom('');
diff --git a/invokeai/frontend/web/src/app/store/nanostores/authToken.ts b/invokeai/frontend/web/src/app/store/nanostores/authToken.ts
deleted file mode 100644
index 1b1e2137309..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/authToken.ts
+++ /dev/null
@@ -1,11 +0,0 @@
-import { atom, computed } from 'nanostores';
-
-/**
- * The user's auth token.
- */
-export const $authToken = atom();
-
-/**
- * The crossOrigin value to use for all image loading. Depends on whether the user is authenticated.
- */
-export const $crossOrigin = computed($authToken, (token) => (token ? 'use-credentials' : 'anonymous'));
diff --git a/invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts b/invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts
deleted file mode 100644
index 19bebab0ef8..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-import { atom } from 'nanostores';
-
-/**
- * The OpenAPI base url.
- */
-export const $baseUrl = atom();
diff --git a/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts b/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts
deleted file mode 100644
index 4f7118e2ebc..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-import { atom } from 'nanostores';
-
-const DEFAULT_BULK_DOWNLOAD_ID = 'default';
-
-/**
- * The download id for a bulk download. Used for socket subscriptions.
- */
-
-export const $bulkDownloadId = atom(DEFAULT_BULK_DOWNLOAD_ID);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts b/invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts
deleted file mode 100644
index 1a6a5571a03..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import { atom } from 'nanostores';
-import type { ReactNode } from 'react';
-
-export const $customNavComponent = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts b/invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts
deleted file mode 100644
index 9f6628ac9cd..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-import type { MenuItemProps } from '@invoke-ai/ui-library';
-import { atom } from 'nanostores';
-
-export type CustomStarUi = {
- on: {
- icon: MenuItemProps['icon'];
- text: string;
- };
- off: {
- icon: MenuItemProps['icon'];
- text: string;
- };
-};
-export const $customStarUI = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts b/invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts
deleted file mode 100644
index b71cab53088..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import { atom } from 'nanostores';
-
-export const $isDebugging = atom(false);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/logo.ts b/invokeai/frontend/web/src/app/store/nanostores/logo.ts
deleted file mode 100644
index 5fd94ebd901..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/logo.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import { atom } from 'nanostores';
-import type { ReactNode } from 'react';
-
-export const $logo = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts b/invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts
deleted file mode 100644
index fdc0d8a788b..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import { atom } from 'nanostores';
-
-export const $onClickGoToModelManager = atom<(() => void) | undefined>(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts b/invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts
deleted file mode 100644
index 124815f7ead..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import { atom } from 'nanostores';
-
-export const $openAPISchemaUrl = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/projectId.ts b/invokeai/frontend/web/src/app/store/nanostores/projectId.ts
deleted file mode 100644
index c2b14e91acb..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/projectId.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-import { atom } from 'nanostores';
-
-/**
- * The optional project-id header.
- */
-export const $projectId = atom();
-
-export const $projectName = atom();
-export const $projectUrl = atom();
diff --git a/invokeai/frontend/web/src/app/store/nanostores/queueId.ts b/invokeai/frontend/web/src/app/store/nanostores/queueId.ts
deleted file mode 100644
index 462cf69d0a6..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/queueId.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-import { atom } from 'nanostores';
-
-export const DEFAULT_QUEUE_ID = 'default';
-
-export const $queueId = atom(DEFAULT_QUEUE_ID);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/toastMap.ts b/invokeai/frontend/web/src/app/store/nanostores/toastMap.ts
deleted file mode 100644
index 10f7795a8a0..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/toastMap.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import type { ToastConfig } from 'features/toast/toast';
-import { atom } from 'nanostores';
-
-export const $toastMap = atom | undefined>(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts b/invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts
deleted file mode 100644
index f36512d7441..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import { atom } from 'nanostores';
-import type { ReactNode } from 'react';
-
-export const $videoUpsellComponent = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts b/invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts
deleted file mode 100644
index 5e8361412e2..00000000000
--- a/invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import { atom } from 'nanostores';
-import type { ReactNode } from 'react';
-
-export const $whatsNew = atom(undefined);
diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts
index 12fcfa5a406..cad6f489df7 100644
--- a/invokeai/frontend/web/src/app/store/store.ts
+++ b/invokeai/frontend/web/src/app/store/store.ts
@@ -4,7 +4,6 @@ import { logger } from 'app/logging/logger';
import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
-import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
@@ -34,20 +33,16 @@ import { nodesSliceConfig } from 'features/nodes/store/nodesSlice';
import { workflowLibrarySliceConfig } from 'features/nodes/store/workflowLibrarySlice';
import { workflowSettingsSliceConfig } from 'features/nodes/store/workflowSettingsSlice';
import { upscaleSliceConfig } from 'features/parameters/store/upscaleSlice';
-import { videoSliceConfig } from 'features/parameters/store/videoSlice';
import { queueSliceConfig } from 'features/queue/store/queueSlice';
import { stylePresetSliceConfig } from 'features/stylePresets/store/stylePresetSlice';
-import { configSliceConfig } from 'features/system/store/configSlice';
import { systemSliceConfig } from 'features/system/store/systemSlice';
import { uiSliceConfig } from 'features/ui/store/uiSlice';
import { diff } from 'jsondiffpatch';
-import dynamicMiddlewares from 'redux-dynamic-middlewares';
import type { SerializeFunction, UnserializeFunction } from 'redux-remember';
import { REMEMBER_REHYDRATED, rememberEnhancer, rememberReducer } from 'redux-remember';
import undoable, { newHistory } from 'redux-undo';
import { serializeError } from 'serialize-error';
import { api } from 'services/api';
-import { authToastMiddleware } from 'services/api/authToastMiddleware';
import type { JsonObject } from 'type-fest';
import { reduxRememberDriver } from './enhancers/reduxRemember/driver';
@@ -57,7 +52,7 @@ import { stateSanitizer } from './middleware/devtools/stateSanitizer';
import { addArchivedOrDeletedBoardListener } from './middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener';
import { addImageUploadedFulfilledListener } from './middleware/listenerMiddleware/listeners/imageUploaded';
-export const listenerMiddleware = createListenerMiddleware();
+const listenerMiddleware = createListenerMiddleware();
const log = logger('system');
@@ -67,7 +62,6 @@ const SLICE_CONFIGS = {
[canvasSettingsSliceConfig.slice.reducerPath]: canvasSettingsSliceConfig,
[canvasSliceConfig.slice.reducerPath]: canvasSliceConfig,
[changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig,
- [configSliceConfig.slice.reducerPath]: configSliceConfig,
[dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig,
[gallerySliceConfig.slice.reducerPath]: gallerySliceConfig,
[lorasSliceConfig.slice.reducerPath]: lorasSliceConfig,
@@ -80,7 +74,6 @@ const SLICE_CONFIGS = {
[systemSliceConfig.slice.reducerPath]: systemSliceConfig,
[uiSliceConfig.slice.reducerPath]: uiSliceConfig,
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig,
- [videoSliceConfig.slice.reducerPath]: videoSliceConfig,
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig,
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig,
};
@@ -97,7 +90,6 @@ const ALL_REDUCERS = {
canvasSliceConfig.undoableConfig?.reduxUndoOptions
),
[changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig.slice.reducer,
- [configSliceConfig.slice.reducerPath]: configSliceConfig.slice.reducer,
[dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig.slice.reducer,
[gallerySliceConfig.slice.reducerPath]: gallerySliceConfig.slice.reducer,
[lorasSliceConfig.slice.reducerPath]: lorasSliceConfig.slice.reducer,
@@ -114,7 +106,6 @@ const ALL_REDUCERS = {
[systemSliceConfig.slice.reducerPath]: systemSliceConfig.slice.reducer,
[uiSliceConfig.slice.reducerPath]: uiSliceConfig.slice.reducer,
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig.slice.reducer,
- [videoSliceConfig.slice.reducerPath]: videoSliceConfig.slice.reducer,
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig.slice.reducer,
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig.slice.reducer,
};
@@ -197,8 +188,6 @@ export const createStore = (options?: { persist?: boolean; persistDebounce?: num
immutableCheck: import.meta.env.MODE === 'development',
})
.concat(api.middleware)
- .concat(dynamicMiddlewares)
- .concat(authToastMiddleware)
// .concat(getDebugLoggerMiddleware({ withDiff: true, withNextState: true }))
.prepend(listenerMiddleware.middleware),
enhancers: (getDefaultEnhancers) => {
@@ -252,6 +241,7 @@ export type AppStartListening = TypedStartListening;
export const addAppListener = addListener.withTypes();
+// To avoid circular dependencies, all listener middleware listeners are added here in the main store setup file.
const startAppListening = listenerMiddleware.startListening as AppStartListening;
addImageUploadedFulfilledListener(startAppListening);
@@ -283,7 +273,6 @@ addModelSelectedListener(startAppListening);
// app startup
addAppStartedListener(startAppListening);
addModelsLoadedListener(startAppListening);
-addAppConfigReceivedListener(startAppListening);
// Ad-hoc upscale workflwo
addAdHocPostProcessingRequestedListener(startAppListening);
diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts
deleted file mode 100644
index b24f83a1b15..00000000000
--- a/invokeai/frontend/web/src/app/types/invokeai.ts
+++ /dev/null
@@ -1,302 +0,0 @@
-import { zFilterType } from 'features/controlLayers/store/filters';
-import { zParameterPrecision, zParameterScheduler } from 'features/parameters/types/parameterSchemas';
-import { zTabName } from 'features/ui/store/uiTypes';
-import type { PartialDeep } from 'type-fest';
-import z from 'zod';
-
-const zAppFeature = z.enum([
- 'faceRestore',
- 'upscaling',
- 'lightbox',
- 'modelManager',
- 'githubLink',
- 'discordLink',
- 'bugLink',
- 'aboutModal',
- 'localization',
- 'consoleLogging',
- 'dynamicPrompting',
- 'batches',
- 'syncModels',
- 'multiselect',
- 'pauseQueue',
- 'resumeQueue',
- 'invocationCache',
- 'modelCache',
- 'bulkDownload',
- 'starterModels',
- 'hfToken',
- 'retryQueueItem',
- 'cancelAndClearAll',
- 'chatGPT4oHigh',
- 'modelRelationships',
-]);
-export type AppFeature = z.infer;
-
-const zSDFeature = z.enum([
- 'controlNet',
- 'noise',
- 'perlinNoise',
- 'noiseThreshold',
- 'variation',
- 'symmetry',
- 'seamless',
- 'hires',
- 'lora',
- 'embedding',
- 'vae',
- 'hrf',
-]);
-export type SDFeature = z.infer;
-
-const zNumericalParameterConfig = z.object({
- initial: z.number().default(512),
- sliderMin: z.number().default(64),
- sliderMax: z.number().default(1536),
- numberInputMin: z.number().default(64),
- numberInputMax: z.number().default(4096),
- fineStep: z.number().default(8),
- coarseStep: z.number().default(64),
-});
-export type NumericalParameterConfig = z.infer;
-
-/**
- * Configuration options for the InvokeAI UI.
- * Distinct from system settings which may be changed inside the app.
- */
-export const zAppConfig = z.object({
- /**
- * Whether or not we should update image urls when image loading errors
- */
- shouldUpdateImagesOnConnect: z.boolean(),
- shouldFetchMetadataFromApi: z.boolean(),
- /**
- * Sets a size limit for outputs on the upscaling tab. This is a maximum dimension, so the actual max number of pixels
- * will be the square of this value.
- */
- maxUpscaleDimension: z.number().optional(),
- allowPrivateBoards: z.boolean(),
- allowPrivateStylePresets: z.boolean(),
- allowClientSideUpload: z.boolean(),
- allowPublishWorkflows: z.boolean(),
- allowPromptExpansion: z.boolean(),
- allowVideo: z.boolean(),
- disabledTabs: z.array(zTabName),
- disabledFeatures: z.array(zAppFeature),
- disabledSDFeatures: z.array(zSDFeature),
- nodesAllowlist: z.array(z.string()).optional(),
- nodesDenylist: z.array(z.string()).optional(),
- metadataFetchDebounce: z.number().int().optional(),
- workflowFetchDebounce: z.number().int().optional(),
- isLocal: z.boolean().optional(),
- shouldShowCredits: z.boolean().optional(),
- sd: z.object({
- defaultModel: z.string().optional(),
- disabledControlNetModels: z.array(z.string()),
- disabledControlNetProcessors: z.array(zFilterType),
- // Core parameters
- iterations: zNumericalParameterConfig,
- width: zNumericalParameterConfig,
- height: zNumericalParameterConfig,
- steps: zNumericalParameterConfig,
- guidance: zNumericalParameterConfig,
- cfgRescaleMultiplier: zNumericalParameterConfig,
- img2imgStrength: zNumericalParameterConfig,
- scheduler: zParameterScheduler.optional(),
- vaePrecision: zParameterPrecision.optional(),
- // Canvas
- boundingBoxHeight: zNumericalParameterConfig,
- boundingBoxWidth: zNumericalParameterConfig,
- scaledBoundingBoxHeight: zNumericalParameterConfig,
- scaledBoundingBoxWidth: zNumericalParameterConfig,
- canvasCoherenceStrength: zNumericalParameterConfig,
- canvasCoherenceEdgeSize: zNumericalParameterConfig,
- infillTileSize: zNumericalParameterConfig,
- infillPatchmatchDownscaleSize: zNumericalParameterConfig,
- // Misc advanced
- clipSkip: zNumericalParameterConfig, // slider and input max are ignored for this, because the values depend on the model
- maskBlur: zNumericalParameterConfig,
- hrfStrength: zNumericalParameterConfig,
- dynamicPrompts: z.object({
- maxPrompts: zNumericalParameterConfig,
- }),
- ca: z.object({
- weight: zNumericalParameterConfig,
- }),
- }),
- flux: z.object({
- guidance: zNumericalParameterConfig,
- }),
-});
-
-export type AppConfig = z.infer;
-export type PartialAppConfig = PartialDeep;
-
-export const getDefaultAppConfig = (): AppConfig => ({
- isLocal: true,
- shouldUpdateImagesOnConnect: false,
- shouldFetchMetadataFromApi: false,
- allowPrivateBoards: false,
- allowPrivateStylePresets: false,
- allowClientSideUpload: false,
- allowPublishWorkflows: false,
- allowPromptExpansion: false,
- allowVideo: false, // used to determine if video is enabled vs upsell
- shouldShowCredits: false,
- disabledTabs: ['video'], // used to determine if video functionality is visible
- disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[],
- disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[],
- sd: {
- disabledControlNetModels: [],
- disabledControlNetProcessors: [],
- iterations: {
- initial: 1,
- sliderMin: 1,
- sliderMax: 1000,
- numberInputMin: 1,
- numberInputMax: 10000,
- fineStep: 1,
- coarseStep: 1,
- },
- width: zNumericalParameterConfig.parse({}), // initial value comes from model
- height: zNumericalParameterConfig.parse({}), // initial value comes from model
- boundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model
- boundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model
- scaledBoundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model
- scaledBoundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model
- scheduler: 'dpmpp_3m_k' as const,
- vaePrecision: 'fp32' as const,
- steps: {
- initial: 30,
- sliderMin: 1,
- sliderMax: 100,
- numberInputMin: 1,
- numberInputMax: 500,
- fineStep: 1,
- coarseStep: 1,
- },
- guidance: {
- initial: 7,
- sliderMin: 1,
- sliderMax: 20,
- numberInputMin: 1,
- numberInputMax: 200,
- fineStep: 0.1,
- coarseStep: 0.5,
- },
- img2imgStrength: {
- initial: 0.7,
- sliderMin: 0,
- sliderMax: 1,
- numberInputMin: 0,
- numberInputMax: 1,
- fineStep: 0.01,
- coarseStep: 0.05,
- },
- canvasCoherenceStrength: {
- initial: 0.3,
- sliderMin: 0,
- sliderMax: 1,
- numberInputMin: 0,
- numberInputMax: 1,
- fineStep: 0.01,
- coarseStep: 0.05,
- },
- hrfStrength: {
- initial: 0.45,
- sliderMin: 0,
- sliderMax: 1,
- numberInputMin: 0,
- numberInputMax: 1,
- fineStep: 0.01,
- coarseStep: 0.05,
- },
- canvasCoherenceEdgeSize: {
- initial: 16,
- sliderMin: 0,
- sliderMax: 128,
- numberInputMin: 0,
- numberInputMax: 1024,
- fineStep: 8,
- coarseStep: 16,
- },
- cfgRescaleMultiplier: {
- initial: 0,
- sliderMin: 0,
- sliderMax: 0.99,
- numberInputMin: 0,
- numberInputMax: 0.99,
- fineStep: 0.05,
- coarseStep: 0.1,
- },
- clipSkip: {
- initial: 0,
- sliderMin: 0,
- sliderMax: 12, // determined by model selection, unused in practice
- numberInputMin: 0,
- numberInputMax: 12, // determined by model selection, unused in practice
- fineStep: 1,
- coarseStep: 1,
- },
- infillPatchmatchDownscaleSize: {
- initial: 1,
- sliderMin: 1,
- sliderMax: 10,
- numberInputMin: 1,
- numberInputMax: 10,
- fineStep: 1,
- coarseStep: 1,
- },
- infillTileSize: {
- initial: 32,
- sliderMin: 16,
- sliderMax: 64,
- numberInputMin: 16,
- numberInputMax: 256,
- fineStep: 1,
- coarseStep: 1,
- },
- maskBlur: {
- initial: 16,
- sliderMin: 0,
- sliderMax: 128,
- numberInputMin: 0,
- numberInputMax: 512,
- fineStep: 1,
- coarseStep: 1,
- },
- ca: {
- weight: {
- initial: 1,
- sliderMin: 0,
- sliderMax: 2,
- numberInputMin: -1,
- numberInputMax: 2,
- fineStep: 0.01,
- coarseStep: 0.05,
- },
- },
- dynamicPrompts: {
- maxPrompts: {
- initial: 100,
- sliderMin: 1,
- sliderMax: 1000,
- numberInputMin: 1,
- numberInputMax: 10000,
- fineStep: 1,
- coarseStep: 10,
- },
- },
- },
- flux: {
- guidance: {
- initial: 4,
- sliderMin: 2,
- sliderMax: 6,
- numberInputMin: 1,
- numberInputMax: 20,
- fineStep: 0.1,
- coarseStep: 0.5,
- },
- },
-});
diff --git a/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts b/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts
index 22a813b6de1..6db4dcbd682 100644
--- a/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts
+++ b/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts
@@ -1,5 +1,4 @@
import type { PopoverProps } from '@invoke-ai/ui-library';
-import commercialLicenseBg from 'public/assets/images/commercial-license-bg.png';
import denoisingStrength from 'public/assets/images/denoising-strength.png';
export type Feature =
@@ -217,10 +216,6 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
seamlessTilingYAxis: {
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
},
- fluxDevLicense: {
- href: 'https://www.invoke.com/get-a-commercial-license-for-flux',
- image: commercialLicenseBg,
- },
} as const;
export const OPEN_DELAY = 1000; // in milliseconds
diff --git a/invokeai/frontend/web/src/common/hooks/focus.ts b/invokeai/frontend/web/src/common/hooks/focus.ts
index 8a04608a13d..4e093c5c631 100644
--- a/invokeai/frontend/web/src/common/hooks/focus.ts
+++ b/invokeai/frontend/web/src/common/hooks/focus.ts
@@ -37,7 +37,6 @@ const REGION_NAMES = [
'workflows',
'progress',
'settings',
- 'video',
] as const;
/**
* The names of the focus regions.
diff --git a/invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts b/invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts
deleted file mode 100644
index f5cc5a7f3f2..00000000000
--- a/invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts
+++ /dev/null
@@ -1,121 +0,0 @@
-import { useStore } from '@nanostores/react';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { imageUploadedClientSide } from 'features/gallery/store/actions';
-import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
-import { useCallback } from 'react';
-import { useCreateImageUploadEntryMutation } from 'services/api/endpoints/images';
-import type { ImageDTO } from 'services/api/types';
-
-type PresignedUrlResponse = {
- fullUrl: string;
- thumbnailUrl: string;
-};
-
-const isPresignedUrlResponse = (response: unknown): response is PresignedUrlResponse => {
- return typeof response === 'object' && response !== null && 'fullUrl' in response && 'thumbnailUrl' in response;
-};
-
-export const useClientSideUpload = () => {
- const dispatch = useAppDispatch();
- const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
- const authToken = useStore($authToken);
- const [createImageUploadEntry] = useCreateImageUploadEntryMutation();
-
- const clientSideUpload = useCallback(
- async (file: File, i: number): Promise => {
- const image = new Image();
- const objectURL = URL.createObjectURL(file);
- image.src = objectURL;
- let width = 0;
- let height = 0;
- let thumbnail: Blob | undefined;
-
- await new Promise((resolve) => {
- image.onload = () => {
- width = image.naturalWidth;
- height = image.naturalHeight;
-
- // Calculate thumbnail dimensions maintaining aspect ratio
- let thumbWidth = width;
- let thumbHeight = height;
- if (width > height && width > 256) {
- thumbWidth = 256;
- thumbHeight = Math.round((height * 256) / width);
- } else if (height > 256) {
- thumbHeight = 256;
- thumbWidth = Math.round((width * 256) / height);
- }
-
- const canvas = document.createElement('canvas');
- canvas.width = thumbWidth;
- canvas.height = thumbHeight;
- const ctx = canvas.getContext('2d');
- ctx?.drawImage(image, 0, 0, thumbWidth, thumbHeight);
-
- canvas.toBlob(
- (blob) => {
- if (blob) {
- thumbnail = blob;
- // Clean up resources
- URL.revokeObjectURL(objectURL);
- image.src = ''; // Clear image source
- image.remove(); // Remove the image element
- canvas.width = 0; // Clear canvas
- canvas.height = 0;
- resolve();
- }
- },
- 'image/webp',
- 0.8
- );
- };
-
- // Handle load errors
- image.onerror = () => {
- URL.revokeObjectURL(objectURL);
- image.remove();
- resolve();
- };
- });
- const { presigned_url, image_dto } = await createImageUploadEntry({
- width,
- height,
- board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
- }).unwrap();
-
- const response = await fetch(presigned_url, {
- method: 'GET',
- ...(authToken && {
- headers: {
- Authorization: `Bearer ${authToken}`,
- },
- }),
- }).then((res) => res.json());
-
- if (!isPresignedUrlResponse(response)) {
- throw new Error('Invalid response');
- }
-
- const fullUrl = response.fullUrl;
- const thumbnailUrl = response.thumbnailUrl;
-
- await fetch(fullUrl, {
- method: 'PUT',
- body: file,
- });
-
- await fetch(thumbnailUrl, {
- method: 'PUT',
- body: thumbnail,
- });
-
- dispatch(imageUploadedClientSide({ imageDTO: image_dto, silent: false, isFirstUploadOfBatch: i === 0 }));
-
- return image_dto;
- },
- [autoAddBoardId, authToken, createImageUploadEntry, dispatch]
- );
-
- return clientSideUpload;
-};
diff --git a/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts b/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts
index d90ec3f2ed1..e46227b2f5d 100644
--- a/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts
+++ b/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts
@@ -1,7 +1,5 @@
-import { useAppDispatch } from 'app/store/storeHooks';
import { useClipboard } from 'common/hooks/useClipboard';
import { convertImageUrlToBlob } from 'common/util/convertImageUrlToBlob';
-import { imageCopiedToClipboard } from 'features/gallery/store/actions';
import { toast } from 'features/toast/toast';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@@ -9,7 +7,6 @@ import { useTranslation } from 'react-i18next';
export const useCopyImageToClipboard = () => {
const { t } = useTranslation();
const clipboard = useClipboard();
- const dispatch = useAppDispatch();
const copyImageToClipboard = useCallback(
async (image_url: string) => {
@@ -26,7 +23,6 @@ export const useCopyImageToClipboard = () => {
title: t('toast.imageCopied'),
status: 'success',
});
- dispatch(imageCopiedToClipboard());
});
} catch (err) {
toast({
@@ -37,7 +33,7 @@ export const useCopyImageToClipboard = () => {
});
}
},
- [clipboard, t, dispatch]
+ [clipboard, t]
);
return copyImageToClipboard;
diff --git a/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts b/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts
index 1309afdbf56..33b90e1d7fe 100644
--- a/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts
+++ b/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts
@@ -1,27 +1,14 @@
-import { useStore } from '@nanostores/react';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { useAppDispatch } from 'app/store/storeHooks';
-import { imageDownloaded } from 'features/gallery/store/actions';
import { toast } from 'features/toast/toast';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const useDownloadItem = () => {
const { t } = useTranslation();
- const dispatch = useAppDispatch();
- const authToken = useStore($authToken);
const downloadItem = useCallback(
async (item_url: string, item_id: string) => {
try {
- const requestOpts = authToken
- ? {
- headers: {
- Authorization: `Bearer ${authToken}`,
- },
- }
- : {};
- const blob = await fetch(item_url, requestOpts).then((resp) => resp.blob());
+ const blob = await fetch(item_url).then((resp) => resp.blob());
if (!blob) {
throw new Error('Unable to create Blob');
}
@@ -34,7 +21,6 @@ export const useDownloadItem = () => {
document.body.appendChild(a);
a.click();
window.URL.revokeObjectURL(url);
- dispatch(imageDownloaded());
} catch (err) {
toast({
id: 'PROBLEM_DOWNLOADING_IMAGE',
@@ -44,7 +30,7 @@ export const useDownloadItem = () => {
});
}
},
- [t, dispatch, authToken]
+ [t]
);
return { downloadItem };
diff --git a/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts b/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts
index a3ccdc01f2a..dd43c0b0947 100644
--- a/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts
+++ b/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts
@@ -1,20 +1,16 @@
import { useAppStore } from 'app/store/storeHooks';
import { useDeleteImageModalApi } from 'features/deleteImageModal/store/state';
-import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state';
import { selectSelection } from 'features/gallery/store/gallerySelectors';
import { useClearQueue } from 'features/queue/hooks/useClearQueue';
import { useDeleteCurrentQueueItem } from 'features/queue/hooks/useDeleteCurrentQueueItem';
import { useInvoke } from 'features/queue/hooks/useInvoke';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { getFocusedRegion } from './focus';
export const useGlobalHotkeys = () => {
const { dispatch, getState } = useAppStore();
- const isVideoEnabled = useFeatureStatus('video');
- const isModelManagerEnabled = useFeatureStatus('modelManager');
const queue = useInvoke();
useRegisteredHotkeys({
@@ -94,18 +90,6 @@ export const useGlobalHotkeys = () => {
dependencies: [dispatch],
});
- useRegisteredHotkeys({
- id: 'selectVideoTab',
- category: 'app',
- callback: () => {
- navigationApi.switchToTab('video');
- },
- options: {
- enabled: isVideoEnabled,
- },
- dependencies: [dispatch],
- });
-
useRegisteredHotkeys({
id: 'selectWorkflowsTab',
category: 'app',
@@ -121,10 +105,7 @@ export const useGlobalHotkeys = () => {
callback: () => {
navigationApi.switchToTab('models');
},
- options: {
- enabled: isModelManagerEnabled,
- },
- dependencies: [dispatch, isModelManagerEnabled],
+ dependencies: [dispatch],
});
useRegisteredHotkeys({
@@ -133,11 +114,10 @@ export const useGlobalHotkeys = () => {
callback: () => {
navigationApi.switchToTab('queue');
},
- dependencies: [dispatch, isModelManagerEnabled],
+ dependencies: [dispatch],
});
const deleteImageModalApi = useDeleteImageModalApi();
- const deleteVideoModalApi = useDeleteVideoModalApi();
useRegisteredHotkeys({
id: 'deleteSelection',
@@ -151,13 +131,7 @@ export const useGlobalHotkeys = () => {
if (!selection.length) {
return;
}
- if (selection.every(({ type }) => type === 'image')) {
- deleteImageModalApi.delete(selection.map((s) => s.id));
- } else if (selection.every(({ type }) => type === 'video')) {
- deleteVideoModalApi.delete(selection.map((s) => s.id));
- } else {
- // no-op, we expect selections to always be only images or only video
- }
+ deleteImageModalApi.delete(selection);
},
dependencies: [getState, deleteImageModalApi],
});
diff --git a/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx b/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx
index 9a06dab6398..445f58c9a66 100644
--- a/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx
+++ b/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx
@@ -3,7 +3,6 @@ import { Button, IconButton } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import { useAppSelector } from 'app/store/storeHooks';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
-import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import { memo, useCallback } from 'react';
import type { Accept, FileRejection } from 'react-dropzone';
@@ -27,7 +26,6 @@ export const dropzoneAccept: Accept = {
'image/webp': ['.webp'].reduce(addUpperCaseReducer, [] as string[]),
};
-import { useClientSideUpload } from './useClientSideUpload';
type UseImageUploadButtonArgs =
| {
isDisabled?: boolean;
@@ -73,9 +71,7 @@ export const useImageUploadButton = ({
onError,
}: UseImageUploadButtonArgs) => {
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
- const isClientSideUploadEnabled = useAppSelector(selectIsClientSideUploadEnabled);
const [uploadImage, request] = useUploadImageMutation();
- const clientSideUpload = useClientSideUpload();
const { t } = useTranslation();
const onDropAccepted = useCallback(
@@ -108,20 +104,16 @@ export const useImageUploadButton = ({
onUploadStarted?.(files);
let imageDTOs: ImageDTO[] = [];
- if (isClientSideUploadEnabled && files.length > 1) {
- imageDTOs = await Promise.all(files.map((file, i) => clientSideUpload(file, i)));
- } else {
- imageDTOs = await uploadImages(
- files.map((file, i) => ({
- file,
- image_category: 'user',
- is_intermediate: false,
- board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
- silent: false,
- isFirstUploadOfBatch: i === 0,
- }))
- );
- }
+ imageDTOs = await uploadImages(
+ files.map((file, i) => ({
+ file,
+ image_category: 'user',
+ is_intermediate: false,
+ board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
+ silent: false,
+ isFirstUploadOfBatch: i === 0,
+ }))
+ );
if (onUpload) {
onUpload(imageDTOs);
}
@@ -135,17 +127,7 @@ export const useImageUploadButton = ({
});
}
},
- [
- allowMultiple,
- onUploadStarted,
- uploadImage,
- autoAddBoardId,
- onUpload,
- isClientSideUploadEnabled,
- clientSideUpload,
- onError,
- t,
- ]
+ [allowMultiple, onUploadStarted, uploadImage, autoAddBoardId, onUpload, onError, t]
);
const onDropRejected = useCallback(
diff --git a/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts b/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts
index 5d1ff434bc9..69816bd0284 100644
--- a/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts
+++ b/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts
@@ -1,5 +1,3 @@
-import { $authToken } from 'app/store/nanostores/authToken';
-
/**
* Converts an image URL to a Blob by creating an
element, drawing it to canvas
* and then converting the canvas to a Blob.
@@ -40,6 +38,6 @@ export const convertImageUrlToBlob = (url: string) =>
reject(new Error('Image failed to load. The URL may be invalid or the object may not exist.'));
};
- img.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
+ img.crossOrigin = 'anonymous';
img.src = url;
});
diff --git a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
index 3413a38e520..00217eb7963 100644
--- a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
+++ b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
@@ -13,18 +13,12 @@ import { memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
import { useAddImagesToBoardMutation, useRemoveImagesFromBoardMutation } from 'services/api/endpoints/images';
-import { useAddVideosToBoardMutation, useRemoveVideosFromBoardMutation } from 'services/api/endpoints/videos';
const selectImagesToChange = createSelector(
selectChangeBoardModalSlice,
(changeBoardModal) => changeBoardModal.image_names
);
-const selectVideosToChange = createSelector(
- selectChangeBoardModalSlice,
- (changeBoardModal) => changeBoardModal.video_ids
-);
-
const selectIsModalOpen = createSelector(
selectChangeBoardModalSlice,
(changeBoardModal) => changeBoardModal.isModalOpen
@@ -38,11 +32,8 @@ const ChangeBoardModal = () => {
const { data: boards, isFetching } = useListAllBoardsQuery({ include_archived: true });
const isModalOpen = useAppSelector(selectIsModalOpen);
const imagesToChange = useAppSelector(selectImagesToChange);
- const videosToChange = useAppSelector(selectVideosToChange);
const [addImagesToBoard] = useAddImagesToBoardMutation();
const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation();
- const [addVideosToBoard] = useAddVideosToBoardMutation();
- const [removeVideosFromBoard] = useRemoveVideosFromBoardMutation();
const { t } = useTranslation();
const options = useMemo(() => {
@@ -66,7 +57,7 @@ const ChangeBoardModal = () => {
}, [dispatch]);
const handleChangeBoard = useCallback(() => {
- if (!selectedBoardId || (imagesToChange.length === 0 && videosToChange.length === 0)) {
+ if (!selectedBoardId || imagesToChange.length === 0) {
return;
}
@@ -80,27 +71,8 @@ const ChangeBoardModal = () => {
});
}
}
- if (videosToChange.length) {
- if (selectedBoardId === 'none') {
- removeVideosFromBoard({ video_ids: videosToChange });
- } else {
- addVideosToBoard({
- video_ids: videosToChange,
- board_id: selectedBoardId,
- });
- }
- }
dispatch(changeBoardReset());
- }, [
- addImagesToBoard,
- dispatch,
- imagesToChange,
- videosToChange,
- removeImagesFromBoard,
- selectedBoardId,
- addVideosToBoard,
- removeVideosFromBoard,
- ]);
+ }, [addImagesToBoard, dispatch, imagesToChange, removeImagesFromBoard, selectedBoardId]);
const onChange = useCallback((v) => {
if (!v) {
@@ -121,15 +93,9 @@ const ChangeBoardModal = () => {
>
- {imagesToChange.length > 0 &&
- t('boards.movingImagesToBoard', {
- count: imagesToChange.length,
- })}
- {videosToChange.length > 0 &&
- t('boards.movingVideosToBoard', {
- count: videosToChange.length,
- })}
- :
+ {t('boards.movingImagesToBoard', {
+ count: imagesToChange.length,
+ })}
[]),
- video_ids: z.array(z.string()).default(() => []),
});
type ChangeBoardModalState = z.infer;
@@ -23,9 +22,6 @@ const slice = createSlice({
imagesToChangeSelected: (state, action: PayloadAction) => {
state.image_names = action.payload;
},
- videosToChangeSelected: (state, action: PayloadAction) => {
- state.video_ids = action.payload;
- },
changeBoardReset: (state) => {
state.image_names = [];
state.isModalOpen = false;
@@ -33,7 +29,7 @@ const slice = createSlice({
},
});
-export const { isModalOpenChanged, imagesToChangeSelected, videosToChangeSelected, changeBoardReset } = slice.actions;
+export const { isModalOpenChanged, imagesToChangeSelected, changeBoardReset } = slice.actions;
export const selectChangeBoardModalSlice = (state: RootState) => state.changeBoardModal;
diff --git a/invokeai/frontend/web/src/features/controlLayers/README.md b/invokeai/frontend/web/src/features/controlLayers/README.md
new file mode 100644
index 00000000000..de2aafee13a
--- /dev/null
+++ b/invokeai/frontend/web/src/features/controlLayers/README.md
@@ -0,0 +1,228 @@
+# Canvas
+
+The canvas is a fairly complex feature. It uses "native" KonvaJS (i.e. not the Konva react bindings) to render a drawing canvas.
+
+It supports layers, drawing, erasing, undo/redo, exporting, backend filters (i.e. filters that require sending image data to teh backend to process) and frontend filters.
+
+## Broad Strokes of Design
+
+The canvas is internally is a hierarchy of classes (modules). All canvas modules inherit from invokeai/frontend/web/src/features/controlLayers/konva/CanvasModuleBase.ts
+
+### Modules
+
+The top-level module is the CanvasManager: invokeai/frontend/web/src/features/controlLayers/konva/CanvasManager.ts
+
+All canvas modules have:
+
+- A unique id (per instance)
+- A ref to its parent module and the canvas manager (the top-leve Manager refs itself)
+- A repr() method that returns a plain JS object representing the module instance
+- A destroy() method to clean up resources
+- A log() method that auto-injects context for the module instanc)
+
+Modules can do anything, they are simply plain-JS classes to encapsulate some functionality. Some are singletons. Some examples:
+
+- A singleton module that handles tool-specific interactions: invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasToolModule.ts
+- Singleton models for each tool e.g. the CanvasBrushToolModule: invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBrushToolModule.ts
+- A singleton module to render the background of the canvas: invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts
+- A strictly logical module that manages various caches of image data: invokeai/frontend/web/src/features/controlLayers/konva/CanvasCacheModule.ts
+- A non-singleton module that handles rendering a brush stroke: invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectBrushLine.ts
+
+### Layers (Entities) and Adapters modules
+
+Canvas has a number of layer types:
+
+- Raster layers: Traditional raster/pixel layers, much like layers in Photoshop
+- Control layers: Internally a raster layer, but designated to hold control data (e.g. depth maps, segmentation masks, etc.) and have special rendering rules
+- Regional guidance layers: A mask-like layer (i.e. it has arbitrary shapes but they have no color or texture, it's just a mask region) plus conditioning data like prompts or ref images. The conditioning is applied only to the masked regions
+- Inpaint mask layers: Another mask-like layer that indicate regions to inpaint/regenerate
+
+Instances of layers are called "entities" in the codebase. Each entity has a type (one of the above), a number of properties (e.g. visibility, opacity, etc.), objects (e.g. brush strokes, shapes, images) and possibly other data.
+
+Each layer type has a corresponding "adapter" module that handles rendering the layer and its objects, applying filters, etc. The adapter modules are non-singleton modules that are instantiated once per layer entity.
+
+Using the raster layer type as an example, it has a number of sub-modules:
+
+- A top-level module that coordinates everything: invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer.ts
+- An object (e.g. brush strokes, shapes, images) renderer that draws the layer via Konva: invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts
+- A "buffer" object renderer, which renders in-progress objects (e.g. a brush stroke that is being drawn but not yet committed, important for performance): invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer.ts
+- A module that handles previewing and applying backend filters: invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer.ts
+- A module that handles selecting objects from the pixel data of a layer (aka segmentation tasks): invokeai/frontend/web/src/features/controlLayers/konva/CanvasSegmentAnythingModule.ts
+- A module that handles transforming the layer (scale, translate, rotate): invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer.ts
+
+## State mgmt
+
+This gets a bit hairy. We have a mix of redux, Konva and nanostores.
+
+At a high level, we use observable/listener patterns to react to state changes and propagate them to where they need to go.
+
+### Redux
+
+Redux is the source of truth for _persistent_ canvas state - layers, their order, etc.
+
+The redux API includes:
+
+- getState(): Get the entire redux state
+- subscribe(listener): Subscribe to state changes, listener is called on _every_ state change, no granularity is provided
+- dispatch(action): Dispatch an action to change state
+
+Redux is not suitable for _transient_ state that changes frequently, e.g. the current brush stroke as the user is drawing it. Syncing every change to redux would be too slow and incur a significant performance penalty that would drop FPS too much.
+
+Canvas modules that have persistent state (e.g. layers, their properties, etc.) use redux to store that state and will subscribe to redux to listen for changes and update themselves as needed.
+
+### Konva
+
+Konva's API is imperative (i.e. you call methods on the Konva nodes to change them) but it renders automatically.
+
+There is no simple way to "subscribe" to changes in Konva nodes. You can listen to certain events (e.g. dragmove, transform, etc.) but there is no generic "node changed" event.
+
+So we almost exclusively push data to Konva, we never "read" from it.
+
+### Nanostores
+
+We use https://github.com/nanostores/nanostores as a lightweight observable state management solution. Nanostores has a plain-JS listener API for subscribing to changes, similar to redux's subscribe(). And it has react bindings so we can use it in react components.
+
+Modules often use nanostores to store their internal state, especially when that state needs to be observed by other modules or react components.
+
+For example, the CanvasToolModule uses a nanostore to hold the current tool (brush, eraser, etc.) and its options (brush size, color, etc.). React components can subscribe to that store to update their UI when the tool or its options change.
+
+So this provides a simple two-way binding between canvas modules and react components.
+
+### State -> Canvas
+
+Data may flow from redux state to Canvas. For example, on canvas init we render all layers and their objects from redux state in Konva:
+
+- Create the layer's entity adapter and all sub-modules
+- Iterate over the layer's objects and create a module instance for each object (e.g. brush stroke, shape, image)
+- Each object module creates the necessary Konva nodes to represent itself and adds them to the layer
+
+The entity adapter subscribes to redux to listen for state changes and pass on the updated state to its sub-modules so they can do whatever they need to do w/ the updated state.
+
+Besides the initial render, we might have to update the Konva representation of a layer when:
+
+- The layer's properties are changed (e.g. visibility, opacity, etc.)
+- The layer's order is changed (e.g. move up/down)
+- User does an undo/redo operation that affects the layer
+- The layer is deleted
+
+### Canvas -> State
+
+When the user interacts w/ the canvas (e.g. draws a brush stroke, erases, moves an object, etc.), we create/update/delete objects in Konva. When the user finishes the interaction (e.g. finishes drawing a brush stroke), we serialize the object to a plain JS object and dispatch a redux action to add the object in redux state.
+
+Using drawing a line on a raster layer as an example, the flow is:
+
+- User initiates a brush stroke and draws
+- We create a brush line object module instance in the layer's buffer renderer
+- The brush line object is given a unique ID
+- The brush line mod creates a Konva.Line node to represent the stroke
+- The brush line mod tracks the stroke as the user draws, updating the Konva.Line node as needed, all in the buffer renderer
+- When the user finishes the stroke, the brush line module transfers control of itself from the layer's buffer renderer to its main renderer
+- As the line is marked complete, the line data is serialized to a plain JS object (i.e. array of points and color) and we dispatch a redux action to add the line object to the layer entity in redux state
+
+Besides drawing tasks, we have similar flows for:
+
+- Transforming a layer (scale, translate, rotate)
+- Filtering a layer
+- Selecting objects from a layer (segmentation tasks)
+
+## Erasing is hard
+
+HTML Canvas has a limited set of compositing modes. These apply globally to the whole canvas element. There is no "local" compositing mode that applies only to a specific shape or object. There is no concept of layers.
+
+So to implement erasing (and opacity!), we have to get creative. Konva handles much of this for us. Each layer is represented internally by a Konva.Layer, which in turn is drawn to its own HTML Canvas element.
+
+Erasing is accomplished by using a globalCompositeOperation of "destination-out" on the brush stroke that is doing the erasing. The brush stroke "cuts a hole" in the layer it is drawn on.
+
+There is a complication. The UX for erasing a layer should be:
+
+- User has a layer, let's say it has an image on it
+- The layer's size is exactly the size of the image
+- User erases the right-hand half of the image
+- The layer's size shrinks to fit the remaining content, i.e. the left half of the image
+- If the user transforms the layer (scale, translate, rotate), the transformations apply only to the remaining content
+
+But the "destination-out" compositing mode only makes the erased pixels transparent. It does not actually remove them from the layer. The layer's bounding box includes the eraser strokes - even though they are transparent. The eraser strokes can actually _enlarge_ the layer's bounding box if the user erases outside the original bounds of the layer.
+
+So, we need a way to calculate the _visual_ bounds of the layer, i.e. the bounding box of all non-transparent pixels. We do this by rendering the layer to an offscreen canvas and reading back the pixel data to calculate the bounds. This process is costly, and we offload some of the work to a web worker to avoid blocking the main thread. Nevertheless, just getting that pixel data is expensive, scaling to the size of the layer.
+
+The usage of the buffer renderer module helps a lot here, as we only need to recalc the bounds when the user finishes a drawing action, not while they are drawing it.
+
+You'll see the relevant code for this in the transformer module. It encapsulates the bounds calculation logic and exposes an observable that holds the last-known visual bounds of the layer.
+
+The worker entrypoint is here invokeai/frontend/web/src/features/controlLayers/konva/CanvasWorkerModule.ts
+
+## Rasterizing layers
+
+Layers consist of a mix of vector and pixel data. For example, a brush stroke is a vector (i.e. array of points) and an image is pixel data.
+
+Ideally we could go straight from user input to pixel data, but this is not feasible for performance reasons. We'd need to write the images to an offscreen canvas, read back the pixel data, send it to the backend, get back the processed pixel data, write it to an offscreen canvas, then read back the pixel data again to update the layer. This would be too slow and block the main thread too much.
+
+So we use a hybrid approach. We keep the vector data in memory and render it to pixel data only when needed, e.g. when the user applies a backend filter or does a transformation on the canvas.
+
+This is unfortunately complicated but we couldn't figure out a more performance way to handle this.
+
+## Compositing layers to prepare for generation
+
+The canvas is a means to an end: provide strong user control and agency for image generation.
+
+When generating an image, the raster layers must be composited toegher into a single image that is sent to the backend. All inpaint masks are similarly composited together into a single mask image. Regional guidance and control layers are not composited together, they are sent as individual images.
+
+This is handled in invokeai/frontend/web/src/features/controlLayers/konva/CanvasCompositorModule.ts
+
+For each compositing task, the compositor creates a unique hash of the layer's state (e.g. objects, properties, etc.) and uses that to cache the resulting composited image's name (which ref a unique ref to the image file stored on disk). This avoids re-compositing layers that haven't changed since the last generation.
+
+## The generation bounding box
+
+Image generation models can only generate images up to certain sizes without causing VRAM OOMs. So we need to give the user a way to specify the size of the generation area. This is done via the "generation bounding box" tool, which is a rectangle that the user can resize and move around the canvas.
+
+Here's the module for it invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts
+
+Models all have width/height constraints - they must be multiples of a certain number (typically 8, 16 or 32). This is related to the internal "latents" representatino of images in diffusion models. So the generation bbox must be constrained to these multiples.
+
+## Staging generations
+
+The typical use pattern for generating images on canvas is to generate a number of variations and pick one or more to keep. This is supported via the "staging area", which is a horizontal strip of image thumbnails below the canvas. These staged images are rendered via React, not Konva.
+
+Once canvas generation starts, much of the canvas is locked down until the user finalizes the staging area, either by accepting a single image, adding one or more images as new layers, or discarding all staged images.
+
+The currently-selected staged image is previewed on the canvas and rendered via invokeai/frontend/web/src/features/controlLayers/konva/CanvasStagingAreaModule.ts
+
+When the user accepts a staged image, it is added as a new raster layer (there are other options for adding as control, saving directly to gallery, etc).
+
+This subsystem tracks generated images by watching the queue of generation tasks. The relevant code for queue tracking is in invokeai/frontend/web/src/features/controlLayers/components/StagingArea/state.ts
+
+## Future enhancements
+
+### Perf: Reduce the number of canvas elements
+
+Each layer has a Konva.Layer which has its own canvas element. Once you get too many of these, the browser starts to struggle.
+
+One idea to improve this would be to have a 3-layer system:
+
+- The active layer is its own Konva.Layer
+- All layers behind it are flattened into a single Konva.Layer
+- All layers in front of it are flattened into a single Konva.Layer
+
+When the user switches the active layer, we re-flatten the layers as needed. This would reduce the number of canvas elements to 3 regardless of how many layers there are. This would greatly improve performance, especially on lower-end devices.
+
+### Perf: Konva in a web worker
+
+All of the heavy konva rendering could be offloaded to a web worker. This would free up the main thread for user interactions and UI updates. The main thread would send user input and state changes to the worker, and the worker would send back rendered images to display.
+
+There used to be a hacky example of this on the Konva docs but I can't find it as of this writing. It requires proxying mouse and keyboard events to the worker, but wasn't too complicated. This could be a _huge_ perf win.
+
+### Abstract state bindings
+
+Currently the state bindings (redux, nanostores) are all over the place. There is a singleton module that handles much of the redux binding, but it's still a bit messy: invokeai/frontend/web/src/features/controlLayers/konva/CanvasStateApiModule.ts
+
+Many modules still directly subscribe to redux with their own selectors.
+
+Ideally we could have a more abstracted state binding system that could handle multiple backends (e.g. redux, nanostores, etc.) in a more uniform way. This would make it easier to manage state and reduce boilerplate code.
+
+### Do not lock down canvas as much during staging
+
+Currently, once the user starts generating images, much of the canvas is locked down until the user finalizes the staging area. This can be frustrating if the user wants to make small adjustments to layers or settings while previewing staged images, but it prevents footguns.
+
+For example, if the user changes the generation bbox size while staging, then queues up more generations, the output images may not match the bbox size, leading to confusion.
+
+It's more locked-down than it needs to be. Theoretically, most of the canvas could be interactive while staging. Just needs some careful through to not be too confusing.
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx
index 4e9dd5ec512..eb2a043864b 100644
--- a/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx
+++ b/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx
@@ -2,7 +2,6 @@ import { Alert, AlertDescription, AlertIcon, AlertTitle } from '@invoke-ai/ui-li
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { useDeferredModelLoadingInvocationProgressMessage } from 'features/controlLayers/hooks/useDeferredModelLoadingInvocationProgressMessage';
-import { selectIsLocal } from 'features/system/store/configSlice';
import { selectSystemShouldShowInvocationProgressDetail } from 'features/system/store/systemSlice';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -44,13 +43,8 @@ CanvasAlertsInvocationProgressContentCommercial.displayName = 'CanvasAlertsInvoc
export const CanvasAlertsInvocationProgress = memo(() => {
const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
- const isLocal = useAppSelector(selectIsLocal);
- if (!isLocal) {
- return ;
- }
-
- // OSS user setting
+ // user setting
if (!shouldShowInvocationProgressDetail) {
return null;
}
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx b/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx
index a21c303d13e..2b3ff537026 100644
--- a/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx
+++ b/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx
@@ -1,18 +1,13 @@
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { createSelector } from '@reduxjs/toolkit';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
-import { includes, map } from 'es-toolkit/compat';
+import { map } from 'es-toolkit/compat';
import type { FilterConfig } from 'features/controlLayers/store/filters';
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/filters';
-import { selectConfigSlice } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { assert } from 'tsafe';
-const selectDisabledProcessors = createSelector(selectConfigSlice, (config) => config.sd.disabledControlNetProcessors);
-
type Props = {
filterType: FilterConfig['type'];
onChange: (filterType: FilterConfig['type']) => void;
@@ -20,12 +15,9 @@ type Props = {
export const FilterTypeSelect = memo(({ filterType, onChange }: Props) => {
const { t } = useTranslation();
- const disabledProcessors = useAppSelector(selectDisabledProcessors);
const options = useMemo(() => {
- return map(IMAGE_FILTERS, (data, type) => ({ value: type, label: t(`controlLayers.filter.${type}.label`) })).filter(
- (o) => !includes(disabledProcessors, o.value)
- );
- }, [disabledProcessors, t]);
+ return map(IMAGE_FILTERS, (data, type) => ({ value: type, label: t(`controlLayers.filter.${type}.label`) }));
+ }, [t]);
const _onChange = useCallback(
(v) => {
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx
index 49a289b875c..34fb96f063d 100644
--- a/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx
+++ b/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx
@@ -13,12 +13,21 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
import WavyLine from 'common/components/WavyLine';
import { selectImg2imgStrength, setImg2imgStrength } from 'features/controlLayers/store/paramsSlice';
import { selectActiveRasterLayerEntities } from 'features/controlLayers/store/selectors';
-import { selectImg2imgStrengthConfig } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useSelectedModelConfig } from 'services/api/hooks/useSelectedModelConfig';
import { isFluxFillMainModelModelConfig } from 'services/api/types';
+const CONSTRAINTS = {
+ initial: 0.7,
+ sliderMin: 0,
+ sliderMax: 1,
+ numberInputMin: 0,
+ numberInputMax: 1,
+ fineStep: 0.01,
+ coarseStep: 0.05,
+};
+
const selectHasRasterLayersWithContent = createSelector(
selectActiveRasterLayerEntities,
(entities) => entities.length > 0
@@ -37,7 +46,6 @@ export const ParamDenoisingStrength = memo(() => {
[dispatch]
);
- const config = useAppSelector(selectImg2imgStrengthConfig);
const { t } = useTranslation();
const [invokeBlue300] = useToken('colors', ['invokeBlue.300']);
@@ -67,20 +75,20 @@ export const ParamDenoisingStrength = memo(() => {
{!isDisabled ? (
<>
void;
@@ -14,11 +21,8 @@ type Props = {
const formatValue = (v: number) => v.toFixed(2);
const marks = [0, 1, 2];
-const selectWeightConfig = createSelector(selectConfigSlice, (config) => config.sd.ca.weight);
-
export const Weight = memo(({ weight, onChange }: Props) => {
const { t } = useTranslation();
- const config = useAppSelector(selectWeightConfig);
return (
@@ -28,23 +32,23 @@ export const Weight = memo(({ weight, onChange }: Props) => {
);
diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts
index 062937edcd0..fe23ec9d90b 100644
--- a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts
@@ -24,20 +24,16 @@ import {
import type {
CanvasEntityIdentifier,
CanvasRegionalGuidanceState,
- ChatGPT4oReferenceImageConfig,
ControlLoRAConfig,
ControlNetConfig,
FluxKontextReferenceImageConfig,
- Gemini2_5ReferenceImageConfig,
IPAdapterConfig,
RegionalGuidanceIPAdapterConfig,
T2IAdapterConfig,
} from 'features/controlLayers/store/types';
import {
- initialChatGPT4oReferenceImage,
initialControlNet,
initialFluxKontextReferenceImage,
- initialGemini2_5ReferenceImage,
initialIPAdapter,
initialRegionalGuidanceIPAdapter,
initialT2IAdapter,
@@ -78,13 +74,7 @@ export const selectDefaultControlAdapter = createSelector(
}
);
-export const getDefaultRefImageConfig = (
- getState: AppGetState
-):
- | IPAdapterConfig
- | ChatGPT4oReferenceImageConfig
- | FluxKontextReferenceImageConfig
- | Gemini2_5ReferenceImageConfig => {
+export const getDefaultRefImageConfig = (getState: AppGetState): IPAdapterConfig | FluxKontextReferenceImageConfig => {
const state = getState();
const mainModelConfig = selectMainModelConfig(state);
@@ -92,25 +82,12 @@ export const getDefaultRefImageConfig = (
const base = mainModelConfig?.base;
- // For ChatGPT-4o, the ref image model is the model itself.
- if (base === 'chatgpt-4o') {
- const config = deepClone(initialChatGPT4oReferenceImage);
- config.model = zModelIdentifierField.parse(mainModelConfig);
- return config;
- }
-
- if (base === 'flux-kontext' || (base === 'flux' && mainModelConfig?.name?.toLowerCase().includes('kontext'))) {
+ if (base === 'flux' && mainModelConfig?.name?.toLowerCase().includes('kontext')) {
const config = deepClone(initialFluxKontextReferenceImage);
config.model = zModelIdentifierField.parse(mainModelConfig);
return config;
}
- if (base === 'gemini-2.5') {
- const config = deepClone(initialGemini2_5ReferenceImage);
- config.model = zModelIdentifierField.parse(mainModelConfig);
- return config;
- }
-
// Otherwise, find the first compatible IP Adapter model.
const modelConfig = ipAdapterModelConfigs.find((m) => m.base === base);
diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts
index d31f18ad6c2..b852d119149 100644
--- a/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts
@@ -1,13 +1,5 @@
import { useAppSelector } from 'app/store/storeHooks';
-import {
- selectIsChatGPT4o,
- selectIsCogView4,
- selectIsFluxKontext,
- selectIsGemini2_5,
- selectIsImagen3,
- selectIsImagen4,
- selectIsSD3,
-} from 'features/controlLayers/store/paramsSlice';
+import { selectIsCogView4, selectIsFluxKontext, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
import type { CanvasEntityType } from 'features/controlLayers/store/types';
import { useMemo } from 'react';
import type { Equals } from 'tsafe';
@@ -16,26 +8,24 @@ import { assert } from 'tsafe';
export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => {
const isSD3 = useAppSelector(selectIsSD3);
const isCogView4 = useAppSelector(selectIsCogView4);
- const isImagen3 = useAppSelector(selectIsImagen3);
- const isImagen4 = useAppSelector(selectIsImagen4);
const isFluxKontext = useAppSelector(selectIsFluxKontext);
- const isChatGPT4o = useAppSelector(selectIsChatGPT4o);
- const isGemini2_5 = useAppSelector(selectIsGemini2_5);
+ // TODO(psyche): consider using a constant to define which entity types are supported by which model,
+ // see invokeai/frontend/web/src/features/modelManagerV2/models.ts for ref
const isEntityTypeEnabled = useMemo(() => {
switch (entityType) {
case 'regional_guidance':
- return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
+ return !isSD3 && !isCogView4 && !isFluxKontext;
case 'control_layer':
- return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
+ return !isSD3 && !isCogView4 && !isFluxKontext;
case 'inpaint_mask':
- return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
+ return !isFluxKontext;
case 'raster_layer':
- return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
+ return !isFluxKontext;
default:
assert>(false);
}
- }, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, isGemini2_5]);
+ }, [entityType, isSD3, isCogView4, isFluxKontext]);
return isEntityTypeEnabled;
};
diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts
index f57c7038e14..b700392c05f 100644
--- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts
@@ -1,5 +1,4 @@
import { getArbitraryBaseColor } from '@invoke-ai/ui-library';
-import { $authToken } from 'app/store/nanostores/authToken';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern';
@@ -95,7 +94,7 @@ export class CanvasBackgroundModule extends CanvasModuleBase {
this.konva.patternRect.fillPatternImage(this.checkboardPattern);
this.render();
};
- this.checkboardPattern.src = $authToken.get() ? 'use-credentials' : 'anonymous';
+ this.checkboardPattern.src = 'anonymous';
this.checkboardPattern.src = this.config.CHECKERBOARD_PATTERN_DATAURL;
this.render();
};
diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts
index f2d7140d68a..8bb8ec25319 100644
--- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts
@@ -1,4 +1,3 @@
-import { $authToken } from 'app/store/nanostores/authToken';
import { rgbColorToString } from 'common/util/colorCodeTransformers';
import { SyncableMap } from 'common/util/SyncableMap/SyncableMap';
import { throttle } from 'es-toolkit/compat';
@@ -38,7 +37,7 @@ function setFillPatternImage(shape: Konva.Shape, ...args: Parameters {
shape.fillPatternImage(imageElement);
};
- imageElement.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
+ imageElement.crossOrigin = 'anonymous';
imageElement.src = getPatternSVG(...args);
return imageElement;
}
diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts
index c684739ede9..0aab41ee7e3 100644
--- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts
@@ -126,7 +126,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
return;
}
- const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url, true));
+ const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url));
if (imageElementResult.isErr()) {
// Image loading failed (e.g. the URL to the "physical" image is invalid)
this.onFailedToLoadImage(
@@ -152,7 +152,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
this.konva.placeholder.text.text(t('common.loadingImage', 'Loading Image'));
}
- const imageElementResult = await withResultAsync(() => loadImage(dataURL, false));
+ const imageElementResult = await withResultAsync(() => loadImage(dataURL));
if (imageElementResult.isErr()) {
// Image loading failed (e.g. the URL to the "physical" image is invalid)
this.onFailedToLoadImage(
diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts
index 088e7f265a4..ecf9a5d1c7c 100644
--- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts
@@ -13,8 +13,6 @@ import { selectBboxOverlay } from 'features/controlLayers/store/canvasSettingsSl
import { selectModel } from 'features/controlLayers/store/paramsSlice';
import { selectBbox } from 'features/controlLayers/store/selectors';
import type { Coordinate, Rect, Tool } from 'features/controlLayers/store/types';
-import { API_BASE_MODELS } from 'features/modelManagerV2/models';
-import type { ModelIdentifierField } from 'features/nodes/types/common';
import Konva from 'konva';
import { atom } from 'nanostores';
import type { Logger } from 'roarr';
@@ -238,22 +236,16 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
this.syncOverlay();
- const model = this.manager.stateApi.runSelector(selectModel);
-
this.konva.transformer.setAttrs({
listening: tool === 'bbox',
- enabledAnchors: this.getEnabledAnchors(tool, model),
+ enabledAnchors: this.getEnabledAnchors(tool),
});
};
- getEnabledAnchors = (tool: Tool, model?: ModelIdentifierField | null): string[] => {
+ getEnabledAnchors = (tool: Tool): string[] => {
if (tool !== 'bbox') {
return NO_ANCHORS;
}
- if (model?.base && API_BASE_MODELS.includes(model.base)) {
- // The bbox is not resizable in these modes
- return NO_ANCHORS;
- }
return ALL_ANCHORS;
};
diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/util.ts b/invokeai/frontend/web/src/features/controlLayers/konva/util.ts
index 8e34f2169c5..6189b3eef78 100644
--- a/invokeai/frontend/web/src/features/controlLayers/konva/util.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/konva/util.ts
@@ -1,5 +1,4 @@
import type { Selector, Store } from '@reduxjs/toolkit';
-import { $authToken, $crossOrigin } from 'app/store/nanostores/authToken';
import { roundDownToMultiple, roundUpToMultiple } from 'common/util/roundDownToMultiple';
import { clamp } from 'es-toolkit/compat';
import type {
@@ -364,7 +363,7 @@ export const dataURLToImageData = (dataURL: string, width: number, height: numbe
reject(e);
};
- image.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
+ image.crossOrigin = 'anonymous';
image.src = dataURL;
});
};
@@ -478,23 +477,14 @@ export function getImageDataTransparency(imageData: ImageData): Transparency {
/**
* Loads an image from a URL and returns a promise that resolves with the loaded image element.
* @param src The image source URL
- * @param fetchUrlFirst Whether to fetch the image's URL first, assuming the provided `src` will redirect to a different URL. This addresses an issue where CORS headers are dropped during a redirect.
* @returns A promise that resolves with the loaded image element
*/
-export async function loadImage(src: string, fetchUrlFirst?: boolean): Promise {
- const authToken = $authToken.get();
- let url = src;
- if (authToken && fetchUrlFirst) {
- const response = await fetch(`${src}?url_only=true`, { credentials: 'include' });
- const data = await response.json();
- url = data.url;
- }
-
+export function loadImage(url: string): Promise {
return new Promise((resolve, reject) => {
const imageElement = new Image();
imageElement.onload = () => resolve(imageElement);
imageElement.onerror = (error) => reject(error);
- imageElement.crossOrigin = $crossOrigin.get();
+ imageElement.crossOrigin = 'anonymous';
imageElement.src = url;
});
}
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts
index b8664aeb5ed..f7eef4a6454 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts
@@ -35,7 +35,6 @@ import {
getScaledBoundingBoxDimensions,
} from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
import { simplifyFlatNumbersArray } from 'features/controlLayers/util/simplify';
-import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
import type { IRect } from 'konva/lib/types';
@@ -73,17 +72,9 @@ import type {
} from './types';
import {
ASPECT_RATIO_MAP,
- CHATGPT_ASPECT_RATIOS,
DEFAULT_ASPECT_RATIO_CONFIG,
- FLUX_KONTEXT_ASPECT_RATIOS,
- GEMINI_2_5_ASPECT_RATIOS,
getEntityIdentifier,
getInitialCanvasState,
- IMAGEN_ASPECT_RATIOS,
- isChatGPT4oAspectRatioID,
- isFluxKontextAspectRatioID,
- isGemini2_5AspectRatioID,
- isImagenAspectRatioID,
isRegionalGuidanceFLUXReduxConfig,
isRegionalGuidanceIPAdapterConfig,
zCanvasState,
@@ -1227,33 +1218,6 @@ const slice = createSlice({
state.bbox.aspectRatio.id = id;
if (id === 'Free') {
state.bbox.aspectRatio.isLocked = false;
- } else if (
- (state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'imagen4') &&
- isImagenAspectRatioID(id)
- ) {
- const { width, height } = IMAGEN_ASPECT_RATIOS[id];
- state.bbox.rect.width = width;
- state.bbox.rect.height = height;
- state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
- state.bbox.aspectRatio.isLocked = true;
- } else if (state.bbox.modelBase === 'chatgpt-4o' && isChatGPT4oAspectRatioID(id)) {
- const { width, height } = CHATGPT_ASPECT_RATIOS[id];
- state.bbox.rect.width = width;
- state.bbox.rect.height = height;
- state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
- state.bbox.aspectRatio.isLocked = true;
- } else if (state.bbox.modelBase === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) {
- const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id];
- state.bbox.rect.width = width;
- state.bbox.rect.height = height;
- state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
- state.bbox.aspectRatio.isLocked = true;
- } else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
- const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id];
- state.bbox.rect.width = width;
- state.bbox.rect.height = height;
- state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
- state.bbox.aspectRatio.isLocked = true;
} else {
state.bbox.aspectRatio.isLocked = true;
state.bbox.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio;
@@ -1700,14 +1664,6 @@ const slice = createSlice({
const base = model?.base;
if (isMainModelBase(base) && state.bbox.modelBase !== base) {
state.bbox.modelBase = base;
- if (API_BASE_MODELS.includes(base)) {
- state.bbox.aspectRatio.isLocked = true;
- state.bbox.aspectRatio.value = 1;
- state.bbox.aspectRatio.id = '1:1';
- state.bbox.rect.width = 1024;
- state.bbox.rect.height = 1024;
- }
-
syncScaledSize(state);
}
});
@@ -1832,10 +1788,6 @@ export const {
} = slice.actions;
const syncScaledSize = (state: CanvasState) => {
- if (API_BASE_MODELS.includes(state.bbox.modelBase)) {
- // Imagen3 has fixed sizes. Scaled bbox is not supported.
- return;
- }
if (state.bbox.scaleMethod === 'auto') {
// Sync both aspect ratio and size
const { width, height } = state.bbox.rect;
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/filters.ts b/invokeai/frontend/web/src/features/controlLayers/store/filters.ts
index 676a353f00f..9373031e11f 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/filters.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/filters.ts
@@ -166,7 +166,7 @@ const _zFilterConfig = z.discriminatedUnion('type', [
]);
export type FilterConfig = z.infer;
-export const zFilterType = z.enum([
+const zFilterType = z.enum([
'adjust_image',
'canny_edge_detection',
'color_map',
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts
index dfde382fcab..42d6986d5c5 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts
@@ -1,7 +1,6 @@
import { createSelector, createSlice, type PayloadAction } from '@reduxjs/toolkit';
import type { RootState } from 'app/store/store';
import type { SliceConfig } from 'app/store/types';
-import type { NumericalParameterConfig } from 'app/types/invokeai';
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import { type LoRA, zLoRA } from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
@@ -9,7 +8,7 @@ import type { LoRAModelConfig } from 'services/api/types';
import { v4 as uuidv4 } from 'uuid';
import z from 'zod';
-export const DEFAULT_LORA_WEIGHT_CONFIG: NumericalParameterConfig = {
+export const DEFAULT_LORA_WEIGHT_CONFIG = {
initial: 0.75,
sliderMin: -1,
sliderMax: 2,
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts
index 609478b4c0c..9dd85b1bc20 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts
@@ -6,31 +6,19 @@ import { deepClone } from 'common/util/deepClone';
import { roundDownToMultiple, roundToMultiple } from 'common/util/roundDownToMultiple';
import { isPlainObject } from 'es-toolkit';
import { clamp } from 'es-toolkit/compat';
-import type { AspectRatioID, ParamsState, RgbaColor } from 'features/controlLayers/store/types';
+import type { AspectRatioID, InfillMethod, ParamsState, RgbaColor } from 'features/controlLayers/store/types';
import {
ASPECT_RATIO_MAP,
- CHATGPT_ASPECT_RATIOS,
DEFAULT_ASPECT_RATIO_CONFIG,
- FLUX_KONTEXT_ASPECT_RATIOS,
- GEMINI_2_5_ASPECT_RATIOS,
getInitialParamsState,
- IMAGEN_ASPECT_RATIOS,
- isChatGPT4oAspectRatioID,
- isFluxKontextAspectRatioID,
- isGemini2_5AspectRatioID,
- isImagenAspectRatioID,
MAX_POSITIVE_PROMPT_HISTORY,
zParamsState,
} from 'features/controlLayers/store/types';
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
import {
- API_BASE_MODELS,
- SUPPORTS_ASPECT_RATIO_BASE_MODELS,
SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS,
SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS,
- SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS,
SUPPORTS_REF_IMAGES_BASE_MODELS,
- SUPPORTS_SEED_BASE_MODELS,
} from 'features/modelManagerV2/models';
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
import type {
@@ -121,14 +109,6 @@ const slice = createSlice({
return;
}
- if (API_BASE_MODELS.includes(model.base)) {
- state.dimensions.aspectRatio.isLocked = true;
- state.dimensions.aspectRatio.value = 1;
- state.dimensions.aspectRatio.id = '1:1';
- state.dimensions.width = 1024;
- state.dimensions.height = 1024;
- }
-
applyClipSkip(state, model, state.clipSkip);
},
vaeSelected: (state, action: PayloadAction) => {
@@ -239,7 +219,7 @@ const slice = createSlice({
setRefinerStart: (state, action: PayloadAction) => {
state.refinerStart = action.payload;
},
- setInfillMethod: (state, action: PayloadAction) => {
+ setInfillMethod: (state, action: PayloadAction) => {
state.infillMethod = action.payload;
},
setInfillTileSize: (state, action: PayloadAction) => {
@@ -318,30 +298,6 @@ const slice = createSlice({
state.dimensions.aspectRatio.id = id;
if (id === 'Free') {
state.dimensions.aspectRatio.isLocked = false;
- } else if ((state.model?.base === 'imagen3' || state.model?.base === 'imagen4') && isImagenAspectRatioID(id)) {
- const { width, height } = IMAGEN_ASPECT_RATIOS[id];
- state.dimensions.width = width;
- state.dimensions.height = height;
- state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height;
- state.dimensions.aspectRatio.isLocked = true;
- } else if (state.model?.base === 'chatgpt-4o' && isChatGPT4oAspectRatioID(id)) {
- const { width, height } = CHATGPT_ASPECT_RATIOS[id];
- state.dimensions.width = width;
- state.dimensions.height = height;
- state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height;
- state.dimensions.aspectRatio.isLocked = true;
- } else if (state.model?.base === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) {
- const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id];
- state.dimensions.width = width;
- state.dimensions.height = height;
- state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height;
- state.dimensions.aspectRatio.isLocked = true;
- } else if (state.model?.base === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
- const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id];
- state.dimensions.width = width;
- state.dimensions.height = height;
- state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height;
- state.dimensions.aspectRatio.isLocked = true;
} else {
state.dimensions.aspectRatio.isLocked = true;
state.dimensions.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio;
@@ -541,19 +497,12 @@ export const selectIsSDXL = createParamsSelector((params) => params.model?.base
export const selectIsFLUX = createParamsSelector((params) => params.model?.base === 'flux');
export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3');
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
-export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
-export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4');
export const selectIsFluxKontext = createParamsSelector((params) => {
- if (params.model?.base === 'flux-kontext') {
- return true;
- }
if (params.model?.base === 'flux' && params.model?.name.toLowerCase().includes('kontext')) {
return true;
}
return false;
});
-export const selectIsChatGPT4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o');
-export const selectIsGemini2_5 = createParamsSelector((params) => params.model?.base === 'gemini-2.5');
export const selectModel = createParamsSelector((params) => params.model);
export const selectModelKey = createParamsSelector((params) => params.model?.key);
@@ -592,26 +541,10 @@ export const selectModelSupportsNegativePrompt = createSelector(
selectModel,
(model) => !!model && SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS.includes(model.base)
);
-export const selectModelSupportsSeed = createSelector(
- selectModel,
- (model) => !!model && SUPPORTS_SEED_BASE_MODELS.includes(model.base)
-);
export const selectModelSupportsRefImages = createSelector(
selectModel,
(model) => !!model && SUPPORTS_REF_IMAGES_BASE_MODELS.includes(model.base)
);
-export const selectModelSupportsAspectRatio = createSelector(
- selectModel,
- (model) => !!model && SUPPORTS_ASPECT_RATIO_BASE_MODELS.includes(model.base)
-);
-export const selectModelSupportsPixelDimensions = createSelector(
- selectModel,
- (model) => !!model && SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS.includes(model.base)
-);
-export const selectIsApiBaseModel = createSelector(
- selectModel,
- (model) => !!model && API_BASE_MODELS.includes(model.base)
-);
export const selectModelSupportsOptimizedDenoising = createSelector(
selectModel,
(model) => !!model && SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS.includes(model.base)
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts
index e787d08fca0..e0896c0fb95 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts
@@ -12,25 +12,13 @@ import type {
RefImagesState,
} from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
-import type {
- ChatGPT4oModelConfig,
- FLUXKontextModelConfig,
- FLUXReduxModelConfig,
- IPAdapterModelConfig,
-} from 'services/api/types';
+import type { FLUXKontextModelConfig, FLUXReduxModelConfig, IPAdapterModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
import type { PartialDeep } from 'type-fest';
import type { CLIPVisionModelV2, IPMethodV2, RefImageState } from './types';
import { getInitialRefImagesState, isFLUXReduxConfig, isIPAdapterConfig, zRefImagesState } from './types';
-import {
- getReferenceImageState,
- initialChatGPT4oReferenceImage,
- initialFluxKontextReferenceImage,
- initialFLUXRedux,
- initialGemini2_5ReferenceImage,
- initialIPAdapter,
-} from './util';
+import { getReferenceImageState, initialFluxKontextReferenceImage, initialFLUXRedux, initialIPAdapter } from './util';
type PayloadActionWithId = T extends void
? PayloadAction<{ id: string }>
@@ -103,7 +91,7 @@ const slice = createSlice({
refImageModelChanged: (
state,
action: PayloadActionWithId<{
- modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig | null;
+ modelConfig: IPAdapterModelConfig | FLUXKontextModelConfig | FLUXReduxModelConfig | null;
}>
) => {
const { id, modelConfig } = action.payload;
@@ -129,31 +117,8 @@ const slice = createSlice({
// The type of ref image depends on the model. When the user switches the model, we rebuild the ref image.
// When we switch the model, we keep the image the same, but change the other parameters.
- if (entity.config.model.base === 'chatgpt-4o') {
- // Switching to chatgpt-4o ref image
- entity.config = {
- ...initialChatGPT4oReferenceImage,
- image: entity.config.image,
- model: entity.config.model,
- };
- return;
- }
-
- if (entity.config.model.base === 'gemini-2.5') {
- // Switching to Gemini 2.5 Flash Preview (nano banana) ref image
- entity.config = {
- ...initialGemini2_5ReferenceImage,
- image: entity.config.image,
- model: entity.config.model,
- };
- return;
- }
-
- if (
- entity.config.model.base === 'flux-kontext' ||
- (entity.config.model.base === 'flux' && entity.config.model.name?.toLowerCase().includes('kontext'))
- ) {
- // Switching to flux-kontext ref image
+ if (entity.config.model.base === 'flux' && entity.config.model.name?.toLowerCase().includes('kontext')) {
+ // Switching to flux kontext ref image
entity.config = {
...initialFluxKontextReferenceImage,
image: entity.config.image,
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts
index 3163bd85b2a..87c173d7cca 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts
@@ -53,7 +53,7 @@ const zCropBox = z.object({
// This parsing happens currently in two places:
// - Recalling metadata.
// - Loading/rehydrating persisted client state from storage.
-export const zCroppableImageWithDims = z.preprocess(
+const zCroppableImageWithDims = z.preprocess(
(val) => {
try {
const imageWithDims = zImageWithDims.parse(val);
@@ -313,25 +313,6 @@ const zRegionalGuidanceFLUXReduxConfig = z.object({
});
type RegionalGuidanceFLUXReduxConfig = z.infer;
-const zChatGPT4oReferenceImageConfig = z.object({
- type: z.literal('chatgpt_4o_reference_image'),
- image: zCroppableImageWithDims.nullable(),
- /**
- * TODO(psyche): Technically there is no model for ChatGPT 4o reference images - it's just a field in the API call.
- * But we use a model drop down to switch between different ref image types, so there needs to be a model here else
- * there will be no way to switch between ref image types.
- */
- model: zModelIdentifierField.nullable(),
-});
-export type ChatGPT4oReferenceImageConfig = z.infer;
-
-const zGemini2_5ReferenceImageConfig = z.object({
- type: z.literal('gemini_2_5_reference_image'),
- image: zCroppableImageWithDims.nullable(),
- model: zModelIdentifierField.nullable(),
-});
-export type Gemini2_5ReferenceImageConfig = z.infer;
-
const zFluxKontextReferenceImageConfig = z.object({
type: z.literal('flux_kontext_reference_image'),
image: zCroppableImageWithDims.nullable(),
@@ -349,13 +330,7 @@ const zCanvasEntityBase = z.object({
export const zRefImageState = z.object({
id: zId,
isEnabled: z.boolean().default(true),
- config: z.discriminatedUnion('type', [
- zIPAdapterConfig,
- zFLUXReduxConfig,
- zChatGPT4oReferenceImageConfig,
- zFluxKontextReferenceImageConfig,
- zGemini2_5ReferenceImageConfig,
- ]),
+ config: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zFluxKontextReferenceImageConfig]),
});
export type RefImageState = z.infer;
@@ -365,18 +340,10 @@ export const isIPAdapterConfig = (config: RefImageState['config']): config is IP
export const isFLUXReduxConfig = (config: RefImageState['config']): config is FLUXReduxConfig =>
config.type === 'flux_redux';
-export const isChatGPT4oReferenceImageConfig = (
- config: RefImageState['config']
-): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image';
-
export const isFluxKontextReferenceImageConfig = (
config: RefImageState['config']
): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image';
-export const isGemini2_5ReferenceImageConfig = (
- config: RefImageState['config']
-): config is Gemini2_5ReferenceImageConfig => config.type === 'gemini_2_5_reference_image';
-
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
export type FillStyle = z.infer;
export const isFillStyle = (v: unknown): v is FillStyle => zFillStyle.safeParse(v).success;
@@ -555,76 +522,6 @@ export const ASPECT_RATIO_MAP: Record, { ratio: n
'9:21': { ratio: 9 / 21, inverseID: '21:9' },
};
-export const zImagen3AspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']);
-type ImagenAspectRatio = z.infer;
-export const isImagenAspectRatioID = (v: unknown): v is ImagenAspectRatio => zImagen3AspectRatioID.safeParse(v).success;
-export const IMAGEN_ASPECT_RATIOS: Record = {
- '16:9': { width: 1408, height: 768 },
- '4:3': { width: 1280, height: 896 },
- '1:1': { width: 1024, height: 1024 },
- '3:4': { width: 896, height: 1280 },
- '9:16': { width: 768, height: 1408 },
-};
-
-export const zChatGPT4oAspectRatioID = z.enum(['3:2', '1:1', '2:3']);
-type ChatGPT4oAspectRatio = z.infer;
-export const isChatGPT4oAspectRatioID = (v: unknown): v is ChatGPT4oAspectRatio =>
- zChatGPT4oAspectRatioID.safeParse(v).success;
-export const CHATGPT_ASPECT_RATIOS: Record = {
- '3:2': { width: 1536, height: 1024 },
- '1:1': { width: 1024, height: 1024 },
- '2:3': { width: 1024, height: 1536 },
-} as const;
-
-export const zGemini2_5AspectRatioID = z.enum(['1:1']);
-type Gemini2_5AspectRatio = z.infer;
-export const isGemini2_5AspectRatioID = (v: unknown): v is Gemini2_5AspectRatio =>
- zGemini2_5AspectRatioID.safeParse(v).success;
-export const GEMINI_2_5_ASPECT_RATIOS: Record = {
- '1:1': { width: 1024, height: 1024 },
-} as const;
-
-export const zFluxKontextAspectRatioID = z.enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', '9:21']);
-type FluxKontextAspectRatio = z.infer;
-export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer =>
- zFluxKontextAspectRatioID.safeParse(v).success;
-export const FLUX_KONTEXT_ASPECT_RATIOS: Record = {
- '3:4': { width: 880, height: 1184 },
- '4:3': { width: 1184, height: 880 },
- '9:16': { width: 752, height: 1392 },
- '16:9': { width: 1392, height: 752 },
- '21:9': { width: 1568, height: 672 },
- '9:21': { width: 672, height: 1568 },
- '1:1': { width: 1024, height: 1024 },
-};
-
-export const zVeo3AspectRatioID = z.enum(['16:9']);
-type Veo3AspectRatio = z.infer;
-export const isVeo3AspectRatioID = (v: unknown): v is Veo3AspectRatio => zVeo3AspectRatioID.safeParse(v).success;
-
-export const zRunwayAspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16', '21:9']);
-type RunwayAspectRatio = z.infer;
-export const isRunwayAspectRatioID = (v: unknown): v is RunwayAspectRatio => zRunwayAspectRatioID.safeParse(v).success;
-
-export const zVideoAspectRatio = z.union([zVeo3AspectRatioID, zRunwayAspectRatioID]);
-export type VideoAspectRatio = z.infer;
-export const isVideoAspectRatio = (v: unknown): v is VideoAspectRatio => zVideoAspectRatio.safeParse(v).success;
-
-export const zVeo3Resolution = z.enum(['720p', '1080p']);
-type Veo3Resolution = z.infer;
-export const isVeo3Resolution = (v: unknown): v is Veo3Resolution => zVeo3Resolution.safeParse(v).success;
-export const RESOLUTION_MAP: Record = {
- '720p': { width: 1280, height: 720 },
- '1080p': { width: 1920, height: 1080 },
-};
-
-export const zRunwayResolution = z.enum(['720p']);
-type RunwayResolution = z.infer;
-export const isRunwayResolution = (v: unknown): v is RunwayResolution => zRunwayResolution.safeParse(v).success;
-
-export const zVideoResolution = z.union([zVeo3Resolution, zRunwayResolution]);
-export type VideoResolution = z.infer;
-
const zAspectRatioConfig = z.object({
id: zAspectRatioID,
value: z.number().gt(0),
@@ -638,24 +535,6 @@ export const DEFAULT_ASPECT_RATIO_CONFIG: AspectRatioConfig = {
isLocked: false,
};
-const zVeo3DurationID = z.enum(['8']);
-type Veo3Duration = z.infer;
-export const isVeo3DurationID = (v: unknown): v is Veo3Duration => zVeo3DurationID.safeParse(v).success;
-export const VEO3_DURATIONS: Record = {
- '8': '8 seconds',
-};
-
-const zRunwayDurationID = z.enum(['5', '10']);
-type RunwayDuration = z.infer;
-export const isRunwayDurationID = (v: unknown): v is RunwayDuration => zRunwayDurationID.safeParse(v).success;
-export const RUNWAY_DURATIONS: Record = {
- '5': '5 seconds',
- '10': '10 seconds',
-};
-
-export const zVideoDuration = z.union([zVeo3DurationID, zRunwayDurationID]);
-export type VideoDuration = z.infer;
-
const zBboxState = z.object({
rect: z.object({
x: z.number().int(),
@@ -683,6 +562,9 @@ const zPositivePromptHistory = z
.array(zParameterPositivePrompt)
.transform((arr) => arr.slice(0, MAX_POSITIVE_PROMPT_HISTORY));
+export const zInfillMethod = z.enum(['patchmatch', 'lama', 'cv2', 'color', 'tile']);
+export type InfillMethod = z.infer;
+
export const zParamsState = z.object({
_version: z.literal(2),
maskBlur: z.number(),
@@ -690,7 +572,7 @@ export const zParamsState = z.object({
canvasCoherenceMode: zParameterCanvasCoherenceMode,
canvasCoherenceMinDenoise: zParameterStrength,
canvasCoherenceEdgeSize: z.number(),
- infillMethod: z.string(),
+ infillMethod: zInfillMethod,
infillTileSize: z.number(),
infillPatchmatchDownscaleSize: z.number(),
infillColorValue: zRgbaColor,
@@ -843,12 +725,7 @@ export const getInitialRefImagesState = (): RefImagesState => ({
export const zCanvasReferenceImageState_OLD = zCanvasEntityBase.extend({
type: z.literal('reference_image'),
- ipAdapter: z.discriminatedUnion('type', [
- zIPAdapterConfig,
- zFLUXReduxConfig,
- zChatGPT4oReferenceImageConfig,
- zGemini2_5ReferenceImageConfig,
- ]),
+ ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig]),
});
export const zCanvasMetadata = z.object({
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/util.ts b/invokeai/frontend/web/src/features/controlLayers/store/util.ts
index 54b484e78ae..737144a63df 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/util.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/util.ts
@@ -7,13 +7,11 @@ import type {
CanvasInpaintMaskState,
CanvasRasterLayerState,
CanvasRegionalGuidanceState,
- ChatGPT4oReferenceImageConfig,
ControlLoRAConfig,
ControlNetConfig,
CroppableImageWithDims,
FluxKontextReferenceImageConfig,
FLUXReduxConfig,
- Gemini2_5ReferenceImageConfig,
ImageWithDims,
IPAdapterConfig,
RasterLayerAdjustments,
@@ -22,7 +20,6 @@ import type {
RgbColor,
T2IAdapterConfig,
} from 'features/controlLayers/store/types';
-import type { ImageField } from 'features/nodes/types/common';
import type { ImageDTO } from 'services/api/types';
import { assert } from 'tsafe';
import type { PartialDeep } from 'type-fest';
@@ -62,8 +59,6 @@ export const imageDTOToCroppableImage = (
return val;
};
-export const imageDTOToImageField = ({ image_name }: ImageDTO): ImageField => ({ image_name });
-
const DEFAULT_RG_MASK_FILL_COLORS: RgbColor[] = [
{ r: 121, g: 157, b: 219 }, // rgb(121, 157, 219)
{ r: 131, g: 214, b: 131 }, // rgb(131, 214, 131)
@@ -111,16 +106,6 @@ export const initialFLUXRedux: FLUXReduxConfig = {
model: null,
imageInfluence: 'highest',
};
-export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = {
- type: 'chatgpt_4o_reference_image',
- image: null,
- model: null,
-};
-export const initialGemini2_5ReferenceImage: Gemini2_5ReferenceImageConfig = {
- type: 'gemini_2_5_reference_image',
- image: null,
- model: null,
-};
export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = {
type: 'flux_kontext_reference_image',
image: null,
diff --git a/invokeai/frontend/web/src/features/cropper/README.md b/invokeai/frontend/web/src/features/cropper/README.md
new file mode 100644
index 00000000000..0903dab7cfc
--- /dev/null
+++ b/invokeai/frontend/web/src/features/cropper/README.md
@@ -0,0 +1,11 @@
+# Image cropper
+
+This is a simple image cropping canvas app built with KonvaJS ("native" Konva, _not_ the react bindings).
+
+The editor implementation is here: invokeai/frontend/web/src/features/cropper/lib/editor.ts
+
+It is rendered in a modal.
+
+Currently, the crop functionality is only exposed for reference images. These are the kind of images that most often need cropping (i.e. for FLUX Kontext, which is sensitive to the size/aspect ratio of its ref images). All ref image state is enriched to include a ref to the original image, the cropped image, and the crop attributes.
+
+The functionality could be extended to all images in the future, but there are some questions around whether we consider gallery images immutable. If so, we can't crop them in place. Do we instead add a new cropped image to the gallery? Or do we add a field to the image metadata that points to a cropped version of the image?
diff --git a/invokeai/frontend/web/src/features/cropper/lib/editor.ts b/invokeai/frontend/web/src/features/cropper/lib/editor.ts
index 6249e3bb255..62ce5ca9df9 100644
--- a/invokeai/frontend/web/src/features/cropper/lib/editor.ts
+++ b/invokeai/frontend/web/src/features/cropper/lib/editor.ts
@@ -1,4 +1,3 @@
-import { $crossOrigin } from 'app/store/nanostores/authToken';
import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern';
import Konva from 'konva';
import type { KonvaEventObject } from 'konva/lib/Node';
@@ -1098,7 +1097,7 @@ export class Editor {
return new Promise((resolve, reject) => {
const img = new Image();
- img.crossOrigin = $crossOrigin.get();
+ img.crossOrigin = 'anonymous';
img.onload = () => {
this.originalImage = img;
diff --git a/invokeai/frontend/web/src/features/deleteImageModal/README.md b/invokeai/frontend/web/src/features/deleteImageModal/README.md
new file mode 100644
index 00000000000..0591676530e
--- /dev/null
+++ b/invokeai/frontend/web/src/features/deleteImageModal/README.md
@@ -0,0 +1,7 @@
+# Delete image modal
+
+When users delete images, we show a confirmation dialog to prevent accidental deletions. Users can opt out of this, but we still check if dleeting an image would screw up their workspace and prompt if so.
+
+For example, if an image is currently set as a field in the workflow editor, we warn the user that deleting it will remove it from the node. We warn them even if they have opted out of the confirmation dialog.
+
+These "image usage" checks are done using redux selectors/util functions. See invokeai/frontend/web/src/features/deleteImageModal/store/state.ts
diff --git a/invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts b/invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts
deleted file mode 100644
index b14cd70ebe8..00000000000
--- a/invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state';
-import { useCallback, useMemo } from 'react';
-import type { VideoDTO } from 'services/api/types';
-
-export const useDeleteVideo = (videoDTO?: VideoDTO | null) => {
- const deleteImageModal = useDeleteVideoModalApi();
-
- const isEnabled = useMemo(() => {
- if (!videoDTO) {
- return;
- }
- return true;
- }, [videoDTO]);
- const _delete = useCallback(() => {
- if (!videoDTO) {
- return;
- }
- if (!isEnabled) {
- return;
- }
- deleteImageModal.delete([videoDTO.video_id]);
- }, [deleteImageModal, videoDTO, isEnabled]);
-
- return {
- delete: _delete,
- isEnabled,
- };
-};
diff --git a/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts b/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts
index 38aa8b039f3..c50aa9465f5 100644
--- a/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts
+++ b/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts
@@ -12,7 +12,7 @@ import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { CanvasState, RefImagesState } from 'features/controlLayers/store/types';
import type { ImageUsage } from 'features/deleteImageModal/store/types';
import { selectGetImageNamesQueryArgs } from 'features/gallery/store/gallerySelectors';
-import { itemSelected } from 'features/gallery/store/gallerySlice';
+import { imageSelected } from 'features/gallery/store/gallerySlice';
import { fieldImageCollectionValueChanged, fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import type { NodesState } from 'features/nodes/store/types';
@@ -89,14 +89,12 @@ const handleDeletions = async (image_names: string[], store: AppStore) => {
const newImageNames = data?.image_names.filter((name) => !deleted_images.includes(name)) || [];
const newSelectedImage = newImageNames[index ?? 0] || null;
- const galleryImageNames = state.gallery.selection.map((s) => s.id);
-
- if (intersection(galleryImageNames, image_names).length > 0) {
+ if (intersection(state.gallery.selection, image_names).length > 0) {
if (newSelectedImage) {
// Some selected images were deleted, clear selection
- dispatch(itemSelected({ type: 'image', id: newSelectedImage }));
+ dispatch(imageSelected(newSelectedImage));
} else {
- dispatch(itemSelected(null));
+ dispatch(imageSelected(null));
}
}
diff --git a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx b/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx
deleted file mode 100644
index 9e56bfba7dc..00000000000
--- a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx
+++ /dev/null
@@ -1,36 +0,0 @@
-import type { IconButtonProps } from '@invoke-ai/ui-library';
-import { IconButton } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectSelectionCount } from 'features/gallery/store/gallerySelectors';
-import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiTrashSimpleBold } from 'react-icons/pi';
-import { $isConnected } from 'services/events/stores';
-
-type Props = Omit & {
- onClick: () => void;
-};
-
-export const DeleteVideoButton = memo((props: Props) => {
- const { onClick, isDisabled } = props;
- const { t } = useTranslation();
- const isConnected = useStore($isConnected);
- const count = useAppSelector(selectSelectionCount);
- const labelMessage: string = `${t('gallery.deleteVideo', { count })} (Del)`;
-
- return (
- }
- tooltip={labelMessage}
- aria-label={labelMessage}
- isDisabled={isDisabled || !isConnected}
- colorScheme="error"
- variant="link"
- alignSelf="stretch"
- />
- );
-});
-
-DeleteVideoButton.displayName = 'DeleteVideoButton';
diff --git a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx b/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx
deleted file mode 100644
index 662e7e6f773..00000000000
--- a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx
+++ /dev/null
@@ -1,43 +0,0 @@
-import { ConfirmationAlertDialog, Flex, FormControl, FormLabel, Switch, Text } from '@invoke-ai/ui-library';
-import { useAppSelector, useAppStore } from 'app/store/storeHooks';
-import { useDeleteVideoModalApi, useDeleteVideoModalState } from 'features/deleteVideoModal/store/state';
-import { selectSystemShouldConfirmOnDelete, setShouldConfirmOnDelete } from 'features/system/store/systemSlice';
-import type { ChangeEvent } from 'react';
-import { memo, useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-
-export const DeleteVideoModal = memo(() => {
- const state = useDeleteVideoModalState();
- const api = useDeleteVideoModalApi();
- const { dispatch } = useAppStore();
- const { t } = useTranslation();
- const shouldConfirmOnDelete = useAppSelector(selectSystemShouldConfirmOnDelete);
-
- const handleChangeShouldConfirmOnDelete = useCallback(
- (e: ChangeEvent) => dispatch(setShouldConfirmOnDelete(!e.target.checked)),
- [dispatch]
- );
-
- return (
-
-
- {t('gallery.deleteVideoPermanent')}
- {t('common.areYouSure')}
-
- {t('common.dontAskMeAgain')}
-
-
-
-
- );
-});
-DeleteVideoModal.displayName = 'DeleteVideoModal';
diff --git a/invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts b/invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts
deleted file mode 100644
index 4e7580ce301..00000000000
--- a/invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts
+++ /dev/null
@@ -1,111 +0,0 @@
-import { useStore } from '@nanostores/react';
-import type { AppStore } from 'app/store/store';
-import { useAppStore } from 'app/store/storeHooks';
-import { intersection } from 'es-toolkit/compat';
-import { selectGetVideoIdsQueryArgs } from 'features/gallery/store/gallerySelectors';
-import { itemSelected } from 'features/gallery/store/gallerySlice';
-import { selectSystemShouldConfirmOnDelete } from 'features/system/store/systemSlice';
-import { atom } from 'nanostores';
-import { useMemo } from 'react';
-import { videosApi } from 'services/api/endpoints/videos';
-
-// Implements an awaitable modal dialog for deleting images
-
-type DeleteVideosModalState = {
- video_ids: string[];
- isOpen: boolean;
- resolve?: () => void;
- reject?: (reason?: string) => void;
-};
-
-const getInitialState = (): DeleteVideosModalState => ({
- video_ids: [],
- isOpen: false,
-});
-
-const $deleteVideosModalState = atom(getInitialState());
-
-const deleteVideosWithDialog = async (video_ids: string[], store: AppStore): Promise => {
- const { getState } = store;
- const shouldConfirmOnDelete = selectSystemShouldConfirmOnDelete(getState());
-
- if (!shouldConfirmOnDelete) {
- // If we don't need to confirm and the resources are not in use, delete them directly
- await handleDeletions(video_ids, store);
- return;
- }
-
- return new Promise((resolve, reject) => {
- $deleteVideosModalState.set({
- video_ids,
- isOpen: true,
- resolve,
- reject,
- });
- });
-};
-
-const handleDeletions = async (video_ids: string[], store: AppStore) => {
- try {
- const { dispatch, getState } = store;
- const state = getState();
- const { data } = videosApi.endpoints.getVideoIds.select(selectGetVideoIdsQueryArgs(state))(state);
- const index = data?.video_ids.findIndex((id) => id === video_ids[0]);
- const { deleted_videos } = await dispatch(
- videosApi.endpoints.deleteVideos.initiate({ video_ids }, { track: false })
- ).unwrap();
-
- const newVideoIds = data?.video_ids.filter((id) => !deleted_videos.includes(id)) || [];
- const newSelectedVideoId = newVideoIds[index ?? 0] || null;
-
- if (
- intersection(
- state.gallery.selection.map((s) => s.id),
- video_ids
- ).length > 0 &&
- newSelectedVideoId
- ) {
- // Some selected images were deleted, clear selection
- dispatch(itemSelected({ type: 'video', id: newSelectedVideoId }));
- }
- } catch {
- // no-op
- }
-};
-
-const confirmDeletion = async (store: AppStore) => {
- const state = $deleteVideosModalState.get();
- await handleDeletions(state.video_ids, store);
- state.resolve?.();
- closeSilently();
-};
-
-const cancelDeletion = () => {
- const state = $deleteVideosModalState.get();
- state.reject?.('User canceled');
- closeSilently();
-};
-
-const closeSilently = () => {
- $deleteVideosModalState.set(getInitialState());
-};
-
-export const useDeleteVideoModalState = () => {
- const state = useStore($deleteVideosModalState);
- return state;
-};
-
-export const useDeleteVideoModalApi = () => {
- const store = useAppStore();
- const api = useMemo(
- () => ({
- delete: (video_ids: string[]) => deleteVideosWithDialog(video_ids, store),
- confirm: () => confirmDeletion(store),
- cancel: cancelDeletion,
- close: closeSilently,
- }),
- [store]
- );
-
- return api;
-};
diff --git a/invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx b/invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx
deleted file mode 100644
index 6ccb7e48503..00000000000
--- a/invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx
+++ /dev/null
@@ -1,63 +0,0 @@
-import type { draggable } from '@atlaskit/pragmatic-drag-and-drop/element/adapter';
-import { setCustomNativeDragPreview } from '@atlaskit/pragmatic-drag-and-drop/element/set-custom-native-drag-preview';
-import { Flex, Heading } from '@invoke-ai/ui-library';
-import type { MultipleVideoDndSourceData } from 'features/dnd/dnd';
-import { DND_IMAGE_DRAG_PREVIEW_SIZE, preserveOffsetOnSourceFallbackCentered } from 'features/dnd/util';
-import { memo } from 'react';
-import { createPortal } from 'react-dom';
-import { useTranslation } from 'react-i18next';
-import type { Param0 } from 'tsafe';
-
-const DndDragPreviewMultipleVideo = memo(({ video_ids }: { video_ids: string[] }) => {
- const { t } = useTranslation();
- return (
-
- {video_ids.length}
- {t('parameters.videos_withCount', { count: video_ids.length })}
-
- );
-});
-
-DndDragPreviewMultipleVideo.displayName = 'DndDragPreviewMultipleVideo';
-
-export type DndDragPreviewMultipleVideoState = {
- type: 'multiple-video';
- container: HTMLElement;
- video_ids: string[];
-};
-
-export const createMultipleVideoDragPreview = (arg: DndDragPreviewMultipleVideoState) =>
- createPortal(, arg.container);
-
-type SetMultipleDragPreviewArg = {
- multipleVideoDndData: MultipleVideoDndSourceData;
- setDragPreviewState: (dragPreviewState: DndDragPreviewMultipleVideoState | null) => void;
- onGenerateDragPreviewArgs: Param0['onGenerateDragPreview']>;
-};
-
-export const setMultipleVideoDragPreview = ({
- multipleVideoDndData,
- onGenerateDragPreviewArgs,
- setDragPreviewState,
-}: SetMultipleDragPreviewArg) => {
- const { nativeSetDragImage, source, location } = onGenerateDragPreviewArgs;
- setCustomNativeDragPreview({
- render({ container }) {
- setDragPreviewState({ type: 'multiple-video', container, video_ids: multipleVideoDndData.payload.video_ids });
- return () => setDragPreviewState(null);
- },
- nativeSetDragImage,
- getOffset: preserveOffsetOnSourceFallbackCentered({
- element: source.element,
- input: location.current.input,
- }),
- });
-};
diff --git a/invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx b/invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx
deleted file mode 100644
index 0fe0fcec752..00000000000
--- a/invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx
+++ /dev/null
@@ -1,69 +0,0 @@
-import type { draggable } from '@atlaskit/pragmatic-drag-and-drop/element/adapter';
-import { setCustomNativeDragPreview } from '@atlaskit/pragmatic-drag-and-drop/element/set-custom-native-drag-preview';
-import { chakra, Flex } from '@invoke-ai/ui-library';
-import type { SingleVideoDndSourceData } from 'features/dnd/dnd';
-import { DND_IMAGE_DRAG_PREVIEW_SIZE, preserveOffsetOnSourceFallbackCentered } from 'features/dnd/util';
-import { GalleryVideoPlaceholder } from 'features/gallery/components/ImageGrid/GalleryVideoPlaceholder';
-import { memo } from 'react';
-import { createPortal } from 'react-dom';
-import type { VideoDTO } from 'services/api/types';
-import type { Param0 } from 'tsafe';
-
-const ChakraImg = chakra('img');
-
-const DndDragPreviewSingleVideo = memo(({ videoDTO }: { videoDTO: VideoDTO }) => {
- return (
-
-
-
-
- );
-});
-
-DndDragPreviewSingleVideo.displayName = 'DndDragPreviewSingleVideo';
-
-export type DndDragPreviewSingleVideoState = {
- type: 'single-video';
- container: HTMLElement;
- videoDTO: VideoDTO;
-};
-
-export const createSingleVideoDragPreview = (arg: DndDragPreviewSingleVideoState) =>
- createPortal(, arg.container);
-
-type SetSingleDragPreviewArg = {
- singleVideoDndData: SingleVideoDndSourceData;
- setDragPreviewState: (dragPreviewState: DndDragPreviewSingleVideoState | null) => void;
- onGenerateDragPreviewArgs: Param0['onGenerateDragPreview']>;
-};
-
-export const setSingleVideoDragPreview = ({
- singleVideoDndData,
- onGenerateDragPreviewArgs,
- setDragPreviewState,
-}: SetSingleDragPreviewArg) => {
- const { nativeSetDragImage, source, location } = onGenerateDragPreviewArgs;
- setCustomNativeDragPreview({
- render({ container }) {
- setDragPreviewState({ type: 'single-video', container, videoDTO: singleVideoDndData.payload.videoDTO });
- return () => setDragPreviewState(null);
- },
- nativeSetDragImage,
- getOffset: preserveOffsetOnSourceFallbackCentered({
- element: source.element,
- input: location.current.input,
- }),
- });
-};
diff --git a/invokeai/frontend/web/src/features/dnd/DndImage.tsx b/invokeai/frontend/web/src/features/dnd/DndImage.tsx
index 71488500b88..2c7e4e8ad30 100644
--- a/invokeai/frontend/web/src/features/dnd/DndImage.tsx
+++ b/invokeai/frontend/web/src/features/dnd/DndImage.tsx
@@ -2,8 +2,6 @@ import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine';
import { draggable } from '@atlaskit/pragmatic-drag-and-drop/element/adapter';
import type { ImageProps, SystemStyleObject } from '@invoke-ai/ui-library';
import { Image } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $crossOrigin } from 'app/store/nanostores/authToken';
import { useAppStore } from 'app/store/storeHooks';
import { singleImageDndSource } from 'features/dnd/dnd';
import type { DndDragPreviewSingleImageState } from 'features/dnd/DndDragPreviewSingleImage';
@@ -31,7 +29,6 @@ type Props = {
export const DndImage = memo(
forwardRef(({ imageDTO, asThumbnail, ...rest }: Props, forwardedRef) => {
const store = useAppStore();
- const crossOrigin = useStore($crossOrigin);
const [isDragging, setIsDragging] = useState(false);
const ref = useRef(null);
@@ -80,7 +77,6 @@ export const DndImage = memo(
height={imageDTO.height}
sx={sx}
data-is-dragging={isDragging}
- crossOrigin={!asThumbnail ? crossOrigin : undefined}
{...rest}
/>
{dragPreviewState?.type === 'single-image' ? createSingleImageDragPreview(dragPreviewState) : null}
diff --git a/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx b/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx
index 192711c5441..0698f1c24b0 100644
--- a/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx
+++ b/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx
@@ -3,7 +3,7 @@ import { IconButton } from '@invoke-ai/ui-library';
import type { MouseEvent } from 'react';
import { memo } from 'react';
-export const imageButtonSx: SystemStyleObject = {
+const imageButtonSx: SystemStyleObject = {
minW: 0,
svg: {
transitionProperty: 'common',
diff --git a/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx b/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx
index f10b5b9d598..e5d7df68f28 100644
--- a/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx
+++ b/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx
@@ -7,12 +7,10 @@ import { Box, Flex, Heading } from '@invoke-ai/ui-library';
import { getStore } from 'app/store/nanostores/store';
import { useAppSelector } from 'app/store/storeHooks';
import { getFocusedRegion } from 'common/hooks/focus';
-import { useClientSideUpload } from 'common/hooks/useClientSideUpload';
import { setFileToPaste } from 'features/controlLayers/components/CanvasPasteModal';
import { DndDropOverlay } from 'features/dnd/DndDropOverlay';
import type { DndTargetState } from 'features/dnd/types';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
-import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback, useEffect, useRef, useState } from 'react';
@@ -68,11 +66,9 @@ export const FullscreenDropzone = memo(() => {
const ref = useRef(null);
const [dndState, setDndState] = useState('idle');
const activeTab = useAppSelector(selectActiveTab);
- const isClientSideUploadEnabled = useAppSelector(selectIsClientSideUploadEnabled);
- const clientSideUpload = useClientSideUpload();
const validateAndUploadFiles = useCallback(
- async (files: File[]) => {
+ (files: File[]) => {
const { getState } = getStore();
const parseResult = z.array(zUploadFile).safeParse(files);
@@ -100,23 +96,17 @@ export const FullscreenDropzone = memo(() => {
const autoAddBoardId = selectAutoAddBoardId(getState());
- if (isClientSideUploadEnabled && files.length > 1) {
- for (const [i, file] of files.entries()) {
- await clientSideUpload(file, i);
- }
- } else {
- const uploadArgs: UploadImageArg[] = files.map((file, i) => ({
- file,
- image_category: 'user',
- is_intermediate: false,
- board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
- isFirstUploadOfBatch: i === 0,
- }));
-
- uploadImages(uploadArgs);
- }
+ const uploadArgs: UploadImageArg[] = files.map((file, i) => ({
+ file,
+ image_category: 'user',
+ is_intermediate: false,
+ board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
+ isFirstUploadOfBatch: i === 0,
+ }));
+
+ uploadImages(uploadArgs);
},
- [activeTab, t, isClientSideUploadEnabled, clientSideUpload]
+ [activeTab, t]
);
const onPaste = useCallback(
diff --git a/invokeai/frontend/web/src/features/dnd/README.md b/invokeai/frontend/web/src/features/dnd/README.md
new file mode 100644
index 00000000000..a3ab881752d
--- /dev/null
+++ b/invokeai/frontend/web/src/features/dnd/README.md
@@ -0,0 +1,41 @@
+# Drag and drop
+
+Dnd functionality is implemented with https://github.com/atlassian/pragmatic-drag-and-drop, the successor to https://github.com/atlassian/react-beautiful-dnd
+
+It uses the native HTML5 drag and drop API and is very performant, though a bit more involved to set up. The library doesn't expose a react API, but rather a set of utility functions to hook into the drag and drop events.
+
+## Implementation
+
+The core of our implementation is in invokeai/frontend/web/src/features/dnd/dnd.ts
+
+We support dragging and dropping of single or multiple images within the app. We have "dnd source" and "dnd target" abstractions.
+
+A dnd source is is anything that provides the draggable payload/data. Currently, that's either an image DTO or list of image names along with their origin board.
+
+A dnd target is anything that can accept a drop of that payload. Targets have their own data. For example, a target might be a board with a board ID, or a canvas layer with a layer ID.
+
+The library has a concept of draggable elements (dnd sources), droppable elements (dnd targets), and dnd monitors. The monitors are invisible elements that track drag events and provide information about the current drag operation.
+
+The library is a bit to wrap your head around but once you understand the concepts, it's very nice to work with and super flexible.
+
+## Type safety
+
+Native drag events do not have any built-in type safety. We inject a unique symbol into the sources and targets and check that via typeguard functions. This gives us confidence that the payload is what we expect it to be and not some other data that might have been dropped from outside the app or some other source.
+
+## Defining sources and targets
+
+These are strictly typed in the dnd.ts file. Follow the examples there to define new sources and targets.
+
+Targets are more complicated - they get an isValid callback (which is called with the currently-dragged source to determine if it can accept the drop) and a handler callback (which is called when the drop is made).
+
+Both isValid and handler get the source data, target data, and the redux getState/dispatch functions. They can do whatever they need to do to determine if the drop is valid and to handle the drop.
+
+Typically the isValid function just uses the source type guard function, and the handler function dispatches one or more redux actions to update the state.
+
+## Other uses of Dnd
+
+We use the same library for other dnd things:
+
+- When dragging over some tabbed interface, hovering the tab for a moment will switch to it. See invokeai/frontend/web/src/common/hooks/useCallbackOnDragEnter.ts for a hook that implements this functionality.
+- Reordering of canvas layer lists. See invokeai/frontend/web/src/features/controlLayers/components/CanvasEntityList/CanvasEntityGroupList.tsx and invokeai/frontend/web/src/features/controlLayers/components/CanvasEntityList/useCanvasEntityListDnd.ts
+- Adding node fields to a workflow form builder and restructuring the form. This gets kinda complicated, as the form builder supports arbitrary nesting of containers with stacking of elements. See invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/dnd-hooks.ts
diff --git a/invokeai/frontend/web/src/features/dnd/dnd.ts b/invokeai/frontend/web/src/features/dnd/dnd.ts
index 0aef104869f..f5e38d4b944 100644
--- a/invokeai/frontend/web/src/features/dnd/dnd.ts
+++ b/invokeai/frontend/web/src/features/dnd/dnd.ts
@@ -9,11 +9,9 @@ import { selectComparisonImages } from 'features/gallery/components/ImageViewer/
import type { BoardId } from 'features/gallery/store/types';
import {
addImagesToBoard,
- addVideosToBoard,
createNewCanvasEntityFromImage,
newCanvasFromImage,
removeImagesFromBoard,
- removeVideosFromBoard,
replaceCanvasEntityObjectsWithImage,
setComparisonImage,
setGlobalReferenceImage,
@@ -24,10 +22,7 @@ import {
import { fieldImageCollectionValueChanged } from 'features/nodes/store/nodesSlice';
import { selectFieldInputInstanceSafe, selectNodesSlice } from 'features/nodes/store/selectors';
import { type FieldIdentifier, isImageFieldCollectionInputInstance } from 'features/nodes/types/field';
-import { startingFrameImageChanged } from 'features/parameters/store/videoSlice';
-import { expandPrompt } from 'features/prompt/PromptExpansion/expand';
-import { promptExpansionApi } from 'features/prompt/PromptExpansion/state';
-import type { ImageDTO, VideoDTO } from 'services/api/types';
+import type { ImageDTO } from 'services/api/types';
import type { JsonObject } from 'type-fest';
const log = logger('dnd');
@@ -74,34 +69,6 @@ type DndSource = {
getData: ReturnType>;
};
-//#region Single Video
-const _singleVideo = buildTypeAndKey('single-video');
-export type SingleVideoDndSourceData = DndData<
- typeof _singleVideo.type,
- typeof _singleVideo.key,
- { videoDTO: VideoDTO }
->;
-export const singleVideoDndSource: DndSource = {
- ..._singleVideo,
- typeGuard: buildTypeGuard(_singleVideo.key),
- getData: buildGetData(_singleVideo.key, _singleVideo.type),
-};
-//#endregion
-
-//#region Multiple Image
-const _multipleVideo = buildTypeAndKey('multiple-video');
-export type MultipleVideoDndSourceData = DndData<
- typeof _multipleVideo.type,
- typeof _multipleVideo.key,
- { video_ids: string[]; board_id: BoardId }
->;
-export const multipleVideoDndSource: DndSource = {
- ..._multipleVideo,
- typeGuard: buildTypeGuard(_multipleVideo.key),
- getData: buildGetData(_multipleVideo.key, _multipleVideo.type),
-};
-//#endregion
-
//#region Single Image
const _singleImage = buildTypeAndKey('single-image');
export type SingleImageDndSourceData = DndData<
@@ -475,22 +442,12 @@ export type AddImageToBoardDndTargetData = DndData<
>;
export const addImageToBoardDndTarget: DndTarget<
AddImageToBoardDndTargetData,
- SingleImageDndSourceData | MultipleImageDndSourceData | SingleVideoDndSourceData | MultipleVideoDndSourceData
+ SingleImageDndSourceData | MultipleImageDndSourceData
> = {
..._addToBoard,
typeGuard: buildTypeGuard(_addToBoard.key),
getData: buildGetData(_addToBoard.key, _addToBoard.type),
isValid: ({ sourceData, targetData }) => {
- if (singleVideoDndSource.typeGuard(sourceData)) {
- const currentBoard = sourceData.payload.videoDTO.board_id ?? 'none';
- const destinationBoard = targetData.payload.boardId;
- return currentBoard !== destinationBoard;
- }
- if (multipleVideoDndSource.typeGuard(sourceData)) {
- const currentBoard = sourceData.payload.board_id;
- const destinationBoard = targetData.payload.boardId;
- return currentBoard !== destinationBoard;
- }
if (singleImageDndSource.typeGuard(sourceData)) {
const currentBoard = sourceData.payload.imageDTO.board_id ?? 'none';
const destinationBoard = targetData.payload.boardId;
@@ -504,18 +461,6 @@ export const addImageToBoardDndTarget: DndTarget<
return false;
},
handler: ({ sourceData, targetData, dispatch }) => {
- if (singleVideoDndSource.typeGuard(sourceData)) {
- const { videoDTO } = sourceData.payload;
- const { boardId } = targetData.payload;
- addVideosToBoard({ video_ids: [videoDTO.video_id], boardId, dispatch });
- }
-
- if (multipleVideoDndSource.typeGuard(sourceData)) {
- const { video_ids } = sourceData.payload;
- const { boardId } = targetData.payload;
- addVideosToBoard({ video_ids, boardId, dispatch });
- }
-
if (singleImageDndSource.typeGuard(sourceData)) {
const { imageDTO } = sourceData.payload;
const { boardId } = targetData.payload;
@@ -541,7 +486,7 @@ export type RemoveImageFromBoardDndTargetData = DndData<
>;
export const removeImageFromBoardDndTarget: DndTarget<
RemoveImageFromBoardDndTargetData,
- SingleImageDndSourceData | MultipleImageDndSourceData | SingleVideoDndSourceData | MultipleVideoDndSourceData
+ SingleImageDndSourceData | MultipleImageDndSourceData
> = {
..._removeFromBoard,
typeGuard: buildTypeGuard(_removeFromBoard.key),
@@ -557,16 +502,6 @@ export const removeImageFromBoardDndTarget: DndTarget<
return currentBoard !== 'none';
}
- if (singleVideoDndSource.typeGuard(sourceData)) {
- const currentBoard = sourceData.payload.videoDTO.board_id ?? 'none';
- return currentBoard !== 'none';
- }
-
- if (multipleVideoDndSource.typeGuard(sourceData)) {
- const currentBoard = sourceData.payload.board_id;
- return currentBoard !== 'none';
- }
-
return false;
},
handler: ({ sourceData, dispatch }) => {
@@ -579,71 +514,9 @@ export const removeImageFromBoardDndTarget: DndTarget<
const { image_names } = sourceData.payload;
removeImagesFromBoard({ image_names, dispatch });
}
-
- if (singleVideoDndSource.typeGuard(sourceData)) {
- const { videoDTO } = sourceData.payload;
- removeVideosFromBoard({ video_ids: [videoDTO.video_id], dispatch });
- }
-
- if (multipleVideoDndSource.typeGuard(sourceData)) {
- const { video_ids } = sourceData.payload;
- removeVideosFromBoard({ video_ids, dispatch });
- }
- },
-};
-
-//#endregion
-
-//#region Prompt Generation From Image
-const _promptGenerationFromImage = buildTypeAndKey('prompt-generation-from-image');
-type PromptGenerationFromImageDndTargetData = DndData<
- typeof _promptGenerationFromImage.type,
- typeof _promptGenerationFromImage.key,
- void
->;
-export const promptGenerationFromImageDndTarget: DndTarget<
- PromptGenerationFromImageDndTargetData,
- SingleImageDndSourceData
-> = {
- ..._promptGenerationFromImage,
- typeGuard: buildTypeGuard(_promptGenerationFromImage.key),
- getData: buildGetData(_promptGenerationFromImage.key, _promptGenerationFromImage.type),
- isValid: ({ sourceData }) => {
- if (singleImageDndSource.typeGuard(sourceData)) {
- return true;
- }
- return false;
- },
- handler: ({ sourceData, dispatch, getState }) => {
- const { imageDTO } = sourceData.payload;
- promptExpansionApi.setPending(imageDTO);
- expandPrompt({ dispatch, getState, imageDTO });
},
};
-//#endregion
-//#region Video Frame From Image
-const _videoFrameFromImage = buildTypeAndKey('video-frame-from-image');
-type VideoFrameFromImageDndTargetData = DndData<
- typeof _videoFrameFromImage.type,
- typeof _videoFrameFromImage.key,
- { frame: 'start' | 'end' }
->;
-export const videoFrameFromImageDndTarget: DndTarget = {
- ..._videoFrameFromImage,
- typeGuard: buildTypeGuard(_videoFrameFromImage.key),
- getData: buildGetData(_videoFrameFromImage.key, _videoFrameFromImage.type),
- isValid: ({ sourceData }) => {
- if (singleImageDndSource.typeGuard(sourceData)) {
- return true;
- }
- return false;
- },
- handler: ({ sourceData, dispatch }) => {
- const { imageDTO } = sourceData.payload;
- dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO)));
- },
-};
//#endregion
export const dndTargets = [
@@ -659,8 +532,6 @@ export const dndTargets = [
replaceCanvasEntityObjectsWithImageDndTarget,
addImageToBoardDndTarget,
removeImageFromBoardDndTarget,
- promptGenerationFromImageDndTarget,
- videoFrameFromImageDndTarget,
] as const;
export type AnyDndTarget = (typeof dndTargets)[number];
diff --git a/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts b/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts
index 8d2aeb30e74..24d6bea1680 100644
--- a/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts
+++ b/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts
@@ -4,13 +4,7 @@ import { logger } from 'app/logging/logger';
import { getStore } from 'app/store/nanostores/store';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { parseify } from 'common/util/serialize';
-import {
- dndTargets,
- multipleImageDndSource,
- multipleVideoDndSource,
- singleImageDndSource,
- singleVideoDndSource,
-} from 'features/dnd/dnd';
+import { dndTargets, multipleImageDndSource, singleImageDndSource } from 'features/dnd/dnd';
import { useEffect } from 'react';
const log = logger('dnd');
@@ -25,12 +19,7 @@ export const useDndMonitor = () => {
const sourceData = source.data;
// Check for allowed sources
- if (
- !singleImageDndSource.typeGuard(sourceData) &&
- !multipleImageDndSource.typeGuard(sourceData) &&
- !singleVideoDndSource.typeGuard(sourceData) &&
- !multipleVideoDndSource.typeGuard(sourceData)
- ) {
+ if (!singleImageDndSource.typeGuard(sourceData) && !multipleImageDndSource.typeGuard(sourceData)) {
return false;
}
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/README.md b/invokeai/frontend/web/src/features/dynamicPrompts/README.md
new file mode 100644
index 00000000000..242a497140a
--- /dev/null
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/README.md
@@ -0,0 +1,11 @@
+# Dynamic prompts
+
+The backend API has a route to process a prompt into a list of prompts using the https://github.com/adieyal/dynamicprompts syntax
+
+In the UI, we watch the current positive prompt field for changes (debounced) and hit that route.
+
+When generating, we queue up a graph for each of the output prompts.
+
+There is a modal to show the list of generated prompts with a couple settings for prompt generation.
+
+The output prompts are stored in the redux slice for ease of consumption during graph building, but only the settings are persisted across page loads. Prompts are ephemeral.
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx
index cc363c8122c..e17743207fa 100644
--- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx
@@ -6,13 +6,21 @@ import {
selectDynamicPromptsCombinatorial,
selectDynamicPromptsMaxPrompts,
} from 'features/dynamicPrompts/store/dynamicPromptsSlice';
-import { selectMaxPromptsConfig } from 'features/system/store/configSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 100,
+ sliderMin: 1,
+ sliderMax: 1000,
+ numberInputMin: 1,
+ numberInputMax: 10000,
+ fineStep: 1,
+ coarseStep: 10,
+};
+
const ParamDynamicPromptsMaxPrompts = () => {
const maxPrompts = useAppSelector(selectDynamicPromptsMaxPrompts);
- const config = useAppSelector(selectMaxPromptsConfig);
const combinatorial = useAppSelector(selectDynamicPromptsCombinatorial);
const dispatch = useAppDispatch();
const { t } = useTranslation();
@@ -30,18 +38,18 @@ const ParamDynamicPromptsMaxPrompts = () => {
{t('dynamicPrompts.maxPrompts')}
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx
index 011ca414f08..2fd6b18b06a 100644
--- a/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx
@@ -9,7 +9,6 @@ import {
} from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
import { selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { useEffect, useMemo } from 'react';
import { utilitiesApi } from 'services/api/endpoints/utilities';
@@ -24,8 +23,6 @@ export const useDynamicPromptsWatcher = () => {
const presetModifiedPrompts = useAppSelector(selectPresetModifiedPrompts);
const maxPrompts = useAppSelector(selectDynamicPromptsMaxPrompts);
- const dynamicPrompting = useFeatureStatus('dynamicPrompting');
-
const debouncedUpdateDynamicPrompts = useMemo(
() =>
debounce(async (positivePrompt: string, maxPrompts: number) => {
@@ -55,10 +52,6 @@ export const useDynamicPromptsWatcher = () => {
);
useEffect(() => {
- if (!dynamicPrompting) {
- return;
- }
-
// Before we execute, imperatively check the dynamic prompts query cache to see if we have already fetched this prompt
const state = getState();
@@ -88,5 +81,5 @@ export const useDynamicPromptsWatcher = () => {
}
debouncedUpdateDynamicPrompts(presetModifiedPrompts.positive, maxPrompts);
- }, [debouncedUpdateDynamicPrompts, dispatch, dynamicPrompting, getState, maxPrompts, presetModifiedPrompts]);
+ }, [debouncedUpdateDynamicPrompts, dispatch, getState, maxPrompts, presetModifiedPrompts]);
};
diff --git a/invokeai/frontend/web/src/features/gallery/README.md b/invokeai/frontend/web/src/features/gallery/README.md
new file mode 100644
index 00000000000..8d9bad8fa69
--- /dev/null
+++ b/invokeai/frontend/web/src/features/gallery/README.md
@@ -0,0 +1,77 @@
+# Gallery Overview
+
+The gallery renders a scrollable grid of images. The image sizes adapt to the viewport size, and the user can scroll to any part of their gallery. It supports keyboard navigation, multi-select and context menus. Images can be dragged from the gallery to use them in other parts of the app (they are not removed from the gallery).
+
+There is some basic ordering and searching support.
+
+## Boards
+
+Boards act as folders for images.
+
+- Users can create any number of boards.
+- Each image can be assigned to at most one board.
+- There is a default "no board" board, labeled "Uncategorized".
+- User-created boards can be deleted. The no-board board cannot be deleted.
+- When deleting a board, users can choose to either delete all images in the board, or move them to the no-board board.
+- User-created boards can be renamed. The no-board board cannot be renamed.
+- Boards cannot be nested.
+- Boards can be archived, which hides them from the board list.
+- There is no way to show all images at once. The gallery view always shows images for a specific board.
+- Boards can be selected to show their images in the panel below the boards list; the gallery grid.
+- Boards can be set as the "auto-add" board. New images will be added to this board as they are generated.
+
+## Image viewer
+
+Clicking an image in the gallery opens it in the image viewer, which presents a larger view of the image, along with a variety of image actions.
+
+The image viewer is rendered in one of the main/center panel tabs.
+
+### Image actions
+
+A handful of common actions are available as buttons in the image viewer header, matching the context menu actions.
+
+See invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md
+
+### Progress viewer
+
+During generation, we might get "progress images" showing a low-res version of the image at each step in the denoising process. If these are available, the user can open a progress viewer overlay to see the image at each step.
+
+Socket subscriptions and related logic for handling progress images are in the image viewer context. See invokeai/frontend/web/src/features/gallery/components/ImageViewer/context.tsx
+
+### Metadata viewer
+
+The user can enable a metadata overlay to view the image metadata. This is rendered as a semi-transparent overlay on top of the image.
+
+"Metadata" refers to key-value pairs of various settings. For example, the prompt, number of steps and model used to generate the image. This metadata is embedded into the image file itself, but also stored in the database for searching and filtering.
+
+Images also have the execution graph embedded in them. This isn't stored in the database, as it can be large and complex. Instead, we extract it from the image when needed.
+
+Metadata can be recalled, and the graph can be loaded into the workflow editor.
+
+### Image comparison
+
+Users can hold Alt when click an image in the gallery to select it as the "comparison" image. The comparison image is shown alongside the current image in the image viewer with a couple modes (slider, side-by-side, hover-to-swap).
+
+## Data fetching
+
+The gallery uses a windowed list to only render the images that are currently visible in the viewport.
+
+It starts by loading a list of all image names for the selected board or view settings. react-virtuoso reports on the currently-visible range of images (plus some "overscan"). We then fetch the full image DTOs only for those images, which are cached by RTK Query. As the user scrolls, the visible range changes and we fetch more image DTOs as needed.
+
+This affords a nice UX, where the user can scroll to any part of their gallery. The scrollbar size never changes.
+
+We've tried some other approachs in the past, but they all had significant UX or implementation issues:
+
+### Infinite scroll
+
+Load an initial chunk of images, then load more as the user scrolls to the bottom.
+
+The scrollbar continually shrinks as more images are loaded.
+
+This yields a poor UX, as the user cannot easily scroll to a specific part of their gallery. It's also pretty complicated to implement within RTK Query, though since we switched, RTK Query now supports infinite queries. It might be easier to do this today.
+
+### Traditional pagination
+
+Show a fixed number of images per page, with pagination controls.
+
+This is a poor UX, as the user cannot easily scroll to a specific part of their gallery. Gallerys are often very large, and the page size changes depending on the viewport size. The gallery is also constantly inserting new images at the top of the list, which means we are constanty invalidating the current page's query cache and the page numbers are not stable.
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
index 721792dbd54..5cc25f6c038 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
@@ -5,7 +5,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { $boardToDelete } from 'features/gallery/components/Boards/DeleteBoardModal';
import { selectAutoAddBoardId, selectAutoAssignBoardOnClick } from 'features/gallery/store/gallerySelectors';
import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { toast } from 'features/toast/toast';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -33,7 +32,6 @@ const BoardContextMenu = ({ board, children }: Props) => {
const isSelectedForAutoAdd = useAppSelector(selectIsSelectedForAutoAdd);
const boardName = useBoardName(board.board_id);
- const isBulkDownloadEnabled = useFeatureStatus('bulkDownload');
const [bulkDownload] = useBulkDownloadImagesMutation();
@@ -79,11 +77,10 @@ const BoardContextMenu = ({ board, children }: Props) => {
{isSelectedForAutoAdd ? t('boards.selectedForAutoAdd') : t('boards.menuItemAutoAdd')}
)}
- {isBulkDownloadEnabled && (
- } onClickCapture={handleBulkDownload}>
- {t('boards.downloadBoard')}
-
- )}
+
+ } onClickCapture={handleBulkDownload}>
+ {t('boards.downloadBoard')}
+
{board.archived && (
} onClick={handleUnarchive}>
@@ -109,7 +106,6 @@ const BoardContextMenu = ({ board, children }: Props) => {
isSelectedForAutoAdd,
handleSetAutoAdd,
t,
- isBulkDownloadEnabled,
handleBulkDownload,
board.archived,
handleUnarchive,
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx
index 6a9e51cb74c..9f59e60fee8 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx
@@ -1,47 +1,32 @@
import { IconButton } from '@invoke-ai/ui-library';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { useAppDispatch } from 'app/store/storeHooks';
import { boardIdSelected, boardSearchTextChanged } from 'features/gallery/store/gallerySlice';
-import { selectAllowPrivateBoards } from 'features/system/store/configSelectors';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
import { useCreateBoardMutation } from 'services/api/endpoints/boards';
-type Props = {
- isPrivateBoard: boolean;
-};
-
-const AddBoardButton = ({ isPrivateBoard }: Props) => {
+const AddBoardButton = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
- const allowPrivateBoards = useAppSelector(selectAllowPrivateBoards);
const [createBoard, { isLoading }] = useCreateBoardMutation();
- const label = useMemo(() => {
- if (!allowPrivateBoards) {
- return t('boards.addBoard');
- }
- if (isPrivateBoard) {
- return t('boards.addPrivateBoard');
- }
- return t('boards.addSharedBoard');
- }, [allowPrivateBoards, isPrivateBoard, t]);
const handleCreateBoard = useCallback(async () => {
try {
- const board = await createBoard({ board_name: t('boards.myBoard'), is_private: isPrivateBoard }).unwrap();
+ const board = await createBoard({ board_name: t('boards.myBoard') }).unwrap();
dispatch(boardIdSelected({ boardId: board.board_id }));
dispatch(boardSearchTextChanged(''));
} catch {
//no-op
}
- }, [t, createBoard, isPrivateBoard, dispatch]);
+ }, [t, createBoard, dispatch]);
return (
}
isLoading={isLoading}
- tooltip={label}
- aria-label={label}
+ tooltip={t('boards.addBoard')}
+ aria-label={t('boards.addBoard')}
onClick={handleCreateBoard}
size="md"
data-testid="add-board-button"
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx
index ce524bac801..8877b22612f 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx
@@ -1,6 +1,5 @@
import { Flex, Image, Text } from '@invoke-ai/ui-library';
import { skipToken } from '@reduxjs/toolkit/query';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { useTranslation } from 'react-i18next';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
import type { BoardDTO } from 'services/api/types';
@@ -10,13 +9,11 @@ type Props = {
boardCounts: {
image_count: number;
asset_count: number;
- video_count: number;
};
};
export const BoardTooltip = ({ board, boardCounts }: Props) => {
const { t } = useTranslation();
- const isVideoEnabled = useFeatureStatus('video');
const { currentData: coverImage } = useGetImageDTOQuery(board?.cover_image_name ?? skipToken);
@@ -39,7 +36,6 @@ export const BoardTooltip = ({ board, boardCounts }: Props) => {
{t('boards.imagesWithCount', { count: boardCounts.image_count })},{' '}
{t('boards.assetsWithCount', { count: boardCounts.asset_count })}
- {isVideoEnabled && {t('boards.videosWithCount', { count: boardCounts.video_count })}}
{board?.archived && ({t('boards.archived')})}
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx
index 3b48882b5c5..2d37a03f69f 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx
@@ -1,4 +1,4 @@
-import { Button, Collapse, Flex, Icon, Text, useDisclosure } from '@invoke-ai/ui-library';
+import { Collapse, Flex, Text, useDisclosure } from '@invoke-ai/ui-library';
import { EMPTY_ARRAY } from 'app/store/constants';
import { useAppSelector } from 'app/store/storeHooks';
import { fixTooltipCloseOnScrollStyles } from 'common/util/fixTooltipCloseOnScrollStyles';
@@ -7,50 +7,38 @@ import {
selectListBoardsQueryArgs,
selectSelectedBoardId,
} from 'features/gallery/store/gallerySelectors';
-import { selectAllowPrivateBoards } from 'features/system/store/configSelectors';
import { memo, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import { PiCaretDownBold } from 'react-icons/pi';
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
import AddBoardButton from './AddBoardButton';
import GalleryBoard from './GalleryBoard';
import NoBoardBoard from './NoBoardBoard';
-type Props = {
- isPrivate: boolean;
-};
-
-export const BoardsList = memo(({ isPrivate }: Props) => {
+export const BoardsList = memo(() => {
const { t } = useTranslation();
const selectedBoardId = useAppSelector(selectSelectedBoardId);
const boardSearchText = useAppSelector(selectBoardSearchText);
const queryArgs = useAppSelector(selectListBoardsQueryArgs);
const { data: boards } = useListAllBoardsQuery(queryArgs);
- const allowPrivateBoards = useAppSelector(selectAllowPrivateBoards);
- const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true });
+ const { isOpen } = useDisclosure({ defaultIsOpen: true });
const filteredBoards = useMemo(() => {
if (!boards) {
return EMPTY_ARRAY;
}
- return boards.filter((board) => {
- if (boardSearchText.length) {
- return board.is_private === isPrivate && board.board_name.toLowerCase().includes(boardSearchText.toLowerCase());
- } else {
- return board.is_private === isPrivate;
- }
- });
- }, [boardSearchText, boards, isPrivate]);
+ if (boardSearchText.length) {
+ return boards.filter((board) => board.board_name.toLowerCase().includes(boardSearchText.toLowerCase()));
+ }
+
+ return boards;
+ }, [boardSearchText, boards]);
const boardElements = useMemo(() => {
const elements = [];
- if (allowPrivateBoards && isPrivate && !boardSearchText.length) {
- elements.push();
- }
- if (!allowPrivateBoards && !boardSearchText.length) {
+ if (!boardSearchText.length) {
elements.push();
}
@@ -61,15 +49,7 @@ export const BoardsList = memo(({ isPrivate }: Props) => {
});
return elements;
- }, [allowPrivateBoards, isPrivate, boardSearchText.length, filteredBoards, selectedBoardId]);
-
- const boardListTitle = useMemo(() => {
- if (allowPrivateBoards) {
- return isPrivate ? t('boards.private') : t('boards.shared');
- } else {
- return t('boards.boards');
- }
- }, [isPrivate, allowPrivateBoards, t]);
+ }, [boardSearchText.length, filteredBoards, selectedBoardId]);
return (
@@ -84,26 +64,10 @@ export const BoardsList = memo(({ isPrivate }: Props) => {
top={0}
bg="base.900"
>
- {allowPrivateBoards ? (
-
- ) : (
-
- {boardListTitle}
-
- )}
-
+
+ {t('boards.boards')}
+
+
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx
index 4b6c4030205..e7fa512d067 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx
@@ -2,9 +2,7 @@ import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine';
import { autoScrollForElements } from '@atlaskit/pragmatic-drag-and-drop-auto-scroll/element';
import { autoScrollForExternal } from '@atlaskit/pragmatic-drag-and-drop-auto-scroll/external';
import { Box } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
-import { selectAllowPrivateBoards } from 'features/system/store/configSelectors';
import type { OverlayScrollbarsComponentRef } from 'overlayscrollbars-react';
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
import type { CSSProperties } from 'react';
@@ -18,7 +16,6 @@ const overlayScrollbarsStyles: CSSProperties = {
};
export const BoardsListWrapper = memo(() => {
- const allowPrivateBoards = useAppSelector(selectAllowPrivateBoards);
const [os, osRef] = useState(null);
useEffect(() => {
const osInstance = os?.osInstance();
@@ -48,8 +45,7 @@ export const BoardsListWrapper = memo(() => {
style={overlayScrollbarsStyles}
options={overlayScrollbarsParams.options}
>
- {allowPrivateBoards && }
-
+
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx
index 772606ec86f..1ddc4b0db36 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx
@@ -15,7 +15,6 @@ import {
selectSelectedBoardId,
} from 'features/gallery/store/gallerySelectors';
import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArchiveBold, PiImageSquare } from 'react-icons/pi';
@@ -37,7 +36,6 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => {
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick);
const selectedBoardId = useAppSelector(selectSelectedBoardId);
- const isVideoEnabled = useFeatureStatus('video');
const onClick = useCallback(() => {
if (selectedBoardId !== board.board_id) {
dispatch(boardIdSelected({ boardId: board.board_id }));
@@ -56,7 +54,6 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => {
() => ({
image_count: board.image_count,
asset_count: board.asset_count,
- video_count: board.video_count,
}),
[board]
);
@@ -95,8 +92,7 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => {
{board.archived && }
- {board.image_count} | {isVideoEnabled && `${board.video_count} | `}
- {board.asset_count}
+ {board.image_count} | {board.asset_count}
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx
index 22ecb71ae8e..900799f8ed7 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx
@@ -13,14 +13,9 @@ import {
selectBoardSearchText,
} from 'features/gallery/store/gallerySelectors';
import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import {
- useGetBoardAssetsTotalQuery,
- useGetBoardImagesTotalQuery,
- useGetBoardVideosTotalQuery,
-} from 'services/api/endpoints/boards';
+import { useGetBoardAssetsTotalQuery, useGetBoardImagesTotalQuery } from 'services/api/endpoints/boards';
import { useBoardName } from 'services/api/hooks/useBoardName';
interface Props {
@@ -33,7 +28,6 @@ const _hover: SystemStyleObject = {
const NoBoardBoard = memo(({ isSelected }: Props) => {
const dispatch = useAppDispatch();
- const isVideoEnabled = useFeatureStatus('video');
const { imagesTotal } = useGetBoardImagesTotalQuery('none', {
selectFromResult: ({ data }) => {
return { imagesTotal: data?.total ?? 0 };
@@ -44,12 +38,6 @@ const NoBoardBoard = memo(({ isSelected }: Props) => {
return { assetsTotal: data?.total ?? 0 };
},
});
- const { videoTotal } = useGetBoardVideosTotalQuery('none', {
- skip: !isVideoEnabled,
- selectFromResult: ({ data }) => {
- return { videoTotal: data?.total ?? 0 };
- },
- });
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick);
const boardSearchText = useAppSelector(selectBoardSearchText);
@@ -74,12 +62,7 @@ const NoBoardBoard = memo(({ isSelected }: Props) => {
{(ref) => (
- }
+ label={}
openDelay={1000}
placement="right"
closeOnScroll
@@ -120,8 +103,7 @@ const NoBoardBoard = memo(({ isSelected }: Props) => {
{autoAddBoardId === 'none' && }
- {imagesTotal} | {isVideoEnabled && `${videoTotal} | `}
- {assetsTotal}
+ {imagesTotal} | {assetsTotal}
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx
index 47d59540f1d..b7d99301e7a 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx
@@ -151,13 +151,7 @@ const DeleteBoardModal = () => {
bottomMessage={t('boards.bottomMessage')}
/>
)}
- {boardToDelete !== 'none' && (
-
- {boardToDelete.is_private
- ? t('boards.deletedPrivateBoardsCannotbeRestored')
- : t('boards.deletedBoardsCannotbeRestored')}
-
- )}
+ {boardToDelete !== 'none' && {t('boards.deletedBoardsCannotbeRestored')}}
{t('gallery.deleteImagePermanent')}
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx
index d4c71580836..b77cfb05ea0 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx
@@ -4,7 +4,6 @@ import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { selectAutoAddBoardId, selectAutoAssignBoardOnClick } from 'features/gallery/store/gallerySelectors';
import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
@@ -23,7 +22,6 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
const dispatch = useAppDispatch();
const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick);
const isSelectedForAutoAdd = useAppSelector(selectIsSelectedForAutoAdd);
- const isBulkDownloadEnabled = useFeatureStatus('bulkDownload');
const [bulkDownload] = useBulkDownloadImagesMutation();
@@ -48,11 +46,9 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
{isSelectedForAutoAdd ? t('boards.selectedForAutoAdd') : t('boards.menuItemAutoAdd')}
)}
- {isBulkDownloadEnabled && (
- } onClickCapture={handleBulkDownload}>
- {t('boards.downloadBoard')}
-
- )}
+ } onClickCapture={handleBulkDownload}>
+ {t('boards.downloadBoard')}
+
}
@@ -68,7 +64,6 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
autoAssignBoardOnClick,
handleBulkDownload,
handleSetAutoAdd,
- isBulkDownloadEnabled,
isSelectedForAutoAdd,
t,
setUncategorizedImagesAsToBeDeleted,
diff --git a/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx b/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx
index f414df82f1e..fa93c597f3e 100644
--- a/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx
@@ -5,7 +5,6 @@ import { useDisclosure } from 'common/hooks/useBoolean';
import { BoardsListWrapper } from 'features/gallery/components/Boards/BoardsList/BoardsListWrapper';
import { BoardsSearch } from 'features/gallery/components/Boards/BoardsList/BoardsSearch';
import { BoardsSettingsPopover } from 'features/gallery/components/Boards/BoardsSettingsPopover';
-import { GalleryHeader } from 'features/gallery/components/GalleryHeader';
import { selectBoardSearchText } from 'features/gallery/store/gallerySelectors';
import { boardSearchTextChanged } from 'features/gallery/store/gallerySlice';
import { useAutoLayoutContext } from 'features/ui/layouts/auto-layout-context';
@@ -62,9 +61,6 @@ export const BoardsPanel = memo(() => {
{t('boards.boards')}
-
-
-
{
const { t } = useTranslation();
const dispatch = useAppDispatch();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- dispatch(imagesToChangeSelected([itemDTO.image_name]));
- } else {
- dispatch(videosToChangeSelected([itemDTO.video_id]));
- }
+ dispatch(imagesToChangeSelected([imageDTO.image_name]));
dispatch(isModalOpenChanged(true));
- }, [dispatch, itemDTO]);
+ }, [dispatch, imageDTO]);
return (
} onClickCapture={onClick}>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx
index 35608d6ddee..ba94e3891b6 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx
@@ -1,23 +1,18 @@
import { IconMenuItem } from 'common/components/IconMenuItem';
import { useCopyImageToClipboard } from 'common/hooks/useCopyImageToClipboard';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCopyBold } from 'react-icons/pi';
-import { isImageDTO } from 'services/api/types';
export const ContextMenuItemCopy = memo(() => {
const { t } = useTranslation();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const copyImageToClipboard = useCopyImageToClipboard();
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- copyImageToClipboard(itemDTO.image_url);
- } else {
- // copyVideoToClipboard(itemDTO.video_url);
- }
- }, [copyImageToClipboard, itemDTO]);
+ copyImageToClipboard(imageDTO.image_url);
+ }, [copyImageToClipboard, imageDTO]);
return (
{
const { t } = useTranslation();
const deleteImageModal = useDeleteImageModalApi();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const onClick = useCallback(async () => {
try {
- if (isImageDTO(itemDTO)) {
- await deleteImageModal.delete([itemDTO.image_name]);
- }
+ await deleteImageModal.delete([imageDTO.image_name]);
} catch {
// noop;
}
- }, [deleteImageModal, itemDTO]);
+ }, [deleteImageModal, imageDTO]);
return (
{
- const { t } = useTranslation();
- const deleteVideoModal = useDeleteVideoModalApi();
- const itemDTO = useItemDTOContext();
-
- const onClick = useCallback(async () => {
- try {
- if (isVideoDTO(itemDTO)) {
- await deleteVideoModal.delete([itemDTO.video_id]);
- }
- } catch {
- // noop;
- }
- }, [deleteVideoModal, itemDTO]);
-
- return (
- }
- onClickCapture={onClick}
- aria-label={t('gallery.deleteVideo', { count: 1 })}
- tooltip={t('gallery.deleteVideo', { count: 1 })}
- isDestructive
- />
- );
-});
-
-ContextMenuItemDeleteVideo.displayName = 'ContextMenuItemDeleteVideo';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx
index b53ba238910..723d806f484 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx
@@ -1,23 +1,18 @@
import { IconMenuItem } from 'common/components/IconMenuItem';
import { useDownloadItem } from 'common/hooks/useDownloadImage';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiDownloadSimpleBold } from 'react-icons/pi';
-import { isImageDTO } from 'services/api/types';
export const ContextMenuItemDownload = memo(() => {
const { t } = useTranslation();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const { downloadItem } = useDownloadItem();
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- downloadItem(itemDTO.image_url, itemDTO.image_name);
- } else {
- downloadItem(itemDTO.video_url, itemDTO.video_id);
- }
- }, [downloadItem, itemDTO]);
+ downloadItem(imageDTO.image_url, imageDTO.image_name);
+ }, [downloadItem, imageDTO]);
return (
{
const { t } = useTranslation();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
const hasTemplates = useStore($hasTemplates);
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- loadWorkflowWithDialog({ type: 'image', data: itemDTO.image_name });
- } else {
- // loadWorkflowWithDialog({ type: 'video', data: itemDTO.video_id });
- }
- }, [loadWorkflowWithDialog, itemDTO]);
+ loadWorkflowWithDialog({ type: 'image', data: imageDTO.image_name });
+ }, [loadWorkflowWithDialog, imageDTO]);
const isDisabled = useMemo(() => {
- if (isImageDTO(itemDTO)) {
- return !itemDTO.has_workflow || !hasTemplates;
- }
- return false;
- }, [itemDTO, hasTemplates]);
+ return !imageDTO.has_workflow || !hasTemplates;
+ }, [imageDTO, hasTemplates]);
return (
} onClickCapture={onClick} isDisabled={isDisabled}>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx
index 8055e592cfe..0a557710975 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx
@@ -1,6 +1,6 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -10,48 +10,33 @@ import { memo, useCallback, useMemo } from 'react';
import { flushSync } from 'react-dom';
import { useTranslation } from 'react-i18next';
import { PiCrosshairBold } from 'react-icons/pi';
-import { isImageDTO } from 'services/api/types';
export const ContextMenuItemLocateInGalery = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const activeTab = useAppSelector(selectActiveTab);
const galleryPanel = useGalleryPanel(activeTab);
const isGalleryImage = useMemo(() => {
- return !itemDTO.is_intermediate;
- }, [itemDTO]);
+ return !imageDTO.is_intermediate;
+ }, [imageDTO]);
const onClick = useCallback(() => {
navigationApi.expandRightPanel();
galleryPanel.expand();
- if (isImageDTO(itemDTO)) {
- flushSync(() => {
- dispatch(
- boardIdSelected({
- boardId: itemDTO.board_id ?? 'none',
- select: {
- selection: [{ type: 'image', id: itemDTO.image_name }],
- galleryView: IMAGE_CATEGORIES.includes(itemDTO.image_category) ? 'images' : 'assets',
- },
- })
- );
- });
- } else {
- flushSync(() => {
- dispatch(
- boardIdSelected({
- boardId: itemDTO.board_id ?? 'none',
- select: {
- selection: [{ type: 'video', id: itemDTO.video_id }],
- galleryView: 'videos',
- },
- })
- );
- });
- }
- }, [dispatch, galleryPanel, itemDTO]);
+ flushSync(() => {
+ dispatch(
+ boardIdSelected({
+ boardId: imageDTO.board_id ?? 'none',
+ select: {
+ selection: [imageDTO.image_name],
+ galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets',
+ },
+ })
+ );
+ });
+ }, [dispatch, galleryPanel, imageDTO]);
return (
} onClickCapture={onClick} isDisabled={!isGalleryImage}>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx
index b4c30ce3dc7..23d21abc68a 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx
@@ -1,6 +1,6 @@
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { useRecallAll } from 'features/gallery/hooks/useRecallAllImageMetadata';
import { useRecallCLIPSkip } from 'features/gallery/hooks/useRecallCLIPSkip';
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
@@ -17,21 +17,19 @@ import {
PiQuotesBold,
PiRulerBold,
} from 'react-icons/pi';
-import type { ImageDTO } from 'services/api/types';
export const ContextMenuItemMetadataRecallActionsCanvasGenerateTabs = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
- // TODO: Implement video recall metadata actions
- const recallAll = useRecallAll(itemDTO as ImageDTO);
- const recallRemix = useRecallRemix(itemDTO as ImageDTO);
- const recallPrompts = useRecallPrompts(itemDTO as ImageDTO);
- const recallSeed = useRecallSeed(itemDTO as ImageDTO);
- const recallDimensions = useRecallDimensions(itemDTO as ImageDTO);
- const recallCLIPSkip = useRecallCLIPSkip(itemDTO as ImageDTO);
+ const recallAll = useRecallAll(imageDTO);
+ const recallRemix = useRecallRemix(imageDTO);
+ const recallPrompts = useRecallPrompts(imageDTO);
+ const recallSeed = useRecallSeed(imageDTO);
+ const recallDimensions = useRecallDimensions(imageDTO);
+ const recallCLIPSkip = useRecallCLIPSkip(imageDTO);
return (
}>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx
index d3511e29e0b..d4aa0a4296b 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx
@@ -1,22 +1,20 @@
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowBendUpLeftBold, PiPlantBold, PiQuotesBold } from 'react-icons/pi';
-import type { ImageDTO } from 'services/api/types';
export const ContextMenuItemMetadataRecallActionsUpscaleTab = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
- // TODO: Implement video recall metadata actions
- const recallPrompts = useRecallPrompts(itemDTO as ImageDTO);
- const recallSeed = useRecallSeed(itemDTO as ImageDTO);
+ const recallPrompts = useRecallPrompts(imageDTO);
+ const recallSeed = useRecallSeed(imageDTO);
return (
}>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx
index b40525ae474..4aa8f9bb3e4 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx
@@ -3,7 +3,7 @@ import { useAppStore } from 'app/store/storeHooks';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
import { useCanvasIsBusySafe } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
-import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { newCanvasFromImage } from 'features/imageActions/actions';
import { toast } from 'features/toast/toast';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -16,7 +16,7 @@ export const ContextMenuItemNewCanvasFromImageSubMenu = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
const store = useAppStore();
- const imageDTO = useItemDTOContextImageOnly();
+ const imageDTO = useImageDTOContext();
const isBusy = useCanvasIsBusySafe();
const isStaging = useCanvasIsStaging();
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx
index 710a381d937..0bc680c7fee 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx
@@ -3,8 +3,7 @@ import { useAppStore } from 'app/store/storeHooks';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
import { NewLayerIcon } from 'features/controlLayers/components/common/icons';
import { useCanvasIsBusySafe } from 'features/controlLayers/hooks/useCanvasIsBusy';
-import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
-import { sentImageToCanvas } from 'features/gallery/store/actions';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { createNewCanvasEntityFromImage } from 'features/imageActions/actions';
import { toast } from 'features/toast/toast';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -17,14 +16,13 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
const store = useAppStore();
- const imageDTO = useItemDTOContextImageOnly();
+ const imageDTO = useImageDTOContext();
const isBusy = useCanvasIsBusySafe();
const onClickNewRasterLayerFromImage = useCallback(async () => {
const { dispatch, getState } = store;
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
createNewCanvasEntityFromImage({ imageDTO, type: 'raster_layer', dispatch, getState });
- dispatch(sentImageToCanvas());
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
@@ -36,7 +34,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => {
const { dispatch, getState } = store;
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
createNewCanvasEntityFromImage({ imageDTO, type: 'control_layer', dispatch, getState });
- dispatch(sentImageToCanvas());
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
@@ -48,7 +45,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => {
const { dispatch, getState } = store;
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
createNewCanvasEntityFromImage({ imageDTO, type: 'inpaint_mask', dispatch, getState });
- dispatch(sentImageToCanvas());
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
@@ -60,7 +56,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => {
const { dispatch, getState } = store;
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
createNewCanvasEntityFromImage({ imageDTO, type: 'regional_guidance', dispatch, getState });
- dispatch(sentImageToCanvas());
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
@@ -72,7 +67,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => {
const { dispatch, getState } = store;
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
createNewCanvasEntityFromImage({ imageDTO, type: 'regional_guidance_with_reference_image', dispatch, getState });
- dispatch(sentImageToCanvas());
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx
index e2b9f42363b..80e99fbfb7a 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx
@@ -1,24 +1,15 @@
-import { useAppDispatch } from 'app/store/storeHooks';
import { IconMenuItem } from 'common/components/IconMenuItem';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
-import { imageOpenedInNewTab } from 'features/gallery/store/actions';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowSquareOutBold } from 'react-icons/pi';
-import { isImageDTO } from 'services/api/types';
export const ContextMenuItemOpenInNewTab = memo(() => {
const { t } = useTranslation();
- const itemDTO = useItemDTOContext();
- const dispatch = useAppDispatch();
+ const imageDTO = useImageDTOContext();
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- window.open(itemDTO.image_url, '_blank');
- dispatch(imageOpenedInNewTab());
- } else {
- window.open(itemDTO.video_url, '_blank');
- }
- }, [itemDTO, dispatch]);
+ window.open(imageDTO.image_url, '_blank');
+ }, [imageDTO]);
return (
{
const dispatch = useAppDispatch();
const { t } = useTranslation();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- dispatch(imageToCompareChanged(null));
- dispatch(itemSelected({ type: 'image', id: itemDTO.image_name }));
- navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID);
- } else {
- // TODO: Implement video open in viewer
- }
- }, [dispatch, itemDTO]);
+ dispatch(imageToCompareChanged(null));
+ dispatch(imageSelected(imageDTO.image_name));
+ navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID);
+ }, [dispatch, imageDTO]);
return (
{
const { t } = useTranslation();
const dispatch = useAppDispatch();
- const itemDTO = useItemDTOContext();
+ const imageDTO = useImageDTOContext();
const selectMaySelectForCompare = useMemo(
() =>
createSelector(selectGallerySlice, (gallery) => {
- if (isImageDTO(itemDTO)) {
- return gallery.imageToCompare !== itemDTO.image_name;
- }
- return false;
+ return gallery.imageToCompare !== imageDTO.image_name;
}),
- [itemDTO]
+ [imageDTO]
);
const maySelectForCompare = useAppSelector(selectMaySelectForCompare);
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- dispatch(imageToCompareChanged(itemDTO.image_name));
- } else {
- // TODO: Implement video select for compare
- }
- }, [dispatch, itemDTO]);
+ dispatch(imageToCompareChanged(imageDTO.image_name));
+ }, [dispatch, imageDTO]);
return (
{
const { t } = useTranslation();
const dispatch = useAppDispatch();
- const imageDTO = useItemDTOContextImageOnly();
+ const imageDTO = useImageDTOContext();
const handleSendToCanvas = useCallback(() => {
dispatch(upscaleInitialImageChanged(imageDTOToImageWithDims(imageDTO)));
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx
deleted file mode 100644
index b59f0addd7d..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import { MenuItem } from '@invoke-ai/ui-library';
-import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
-import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
-import { startingFrameImageChanged } from 'features/parameters/store/videoSlice';
-import { navigationApi } from 'features/ui/layouts/navigation-api';
-import { memo, useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiVideoBold } from 'react-icons/pi';
-import { useDispatch } from 'react-redux';
-
-export const ContextMenuItemSendToVideo = memo(() => {
- const { t } = useTranslation();
- const imageDTO = useItemDTOContextImageOnly();
- const dispatch = useDispatch();
-
- const onClick = useCallback(() => {
- dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO)));
- navigationApi.switchToTab('video');
- }, [imageDTO, dispatch]);
-
- return (
- } onClickCapture={onClick} aria-label={t('parameters.sendToVideo')}>
- {t('parameters.sendToVideo')}
-
- );
-});
-
-ContextMenuItemSendToVideo.displayName = 'ContextMenuItemSendToVideo';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx
index 4828fb5e9ce..b6e465db463 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx
@@ -1,50 +1,35 @@
import { MenuItem } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $customStarUI } from 'app/store/nanostores/customStarUI';
-import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiStarBold, PiStarFill } from 'react-icons/pi';
import { useStarImagesMutation, useUnstarImagesMutation } from 'services/api/endpoints/images';
-import { useStarVideosMutation, useUnstarVideosMutation } from 'services/api/endpoints/videos';
-import { isImageDTO, isVideoDTO } from 'services/api/types';
export const ContextMenuItemStarUnstar = memo(() => {
const { t } = useTranslation();
- const itemDTO = useItemDTOContext();
- const customStarUi = useStore($customStarUI);
+ const imageDTO = useImageDTOContext();
const [starImages] = useStarImagesMutation();
const [unstarImages] = useUnstarImagesMutation();
- const [starVideos] = useStarVideosMutation();
- const [unstarVideos] = useUnstarVideosMutation();
const starImage = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- starImages({ image_names: [itemDTO.image_name] });
- } else if (isVideoDTO(itemDTO)) {
- starVideos({ video_ids: [itemDTO.video_id] });
- }
- }, [starImages, itemDTO, starVideos]);
+ starImages({ image_names: [imageDTO.image_name] });
+ }, [starImages, imageDTO]);
const unstarImage = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- unstarImages({ image_names: [itemDTO.image_name] });
- } else if (isVideoDTO(itemDTO)) {
- unstarVideos({ video_ids: [itemDTO.video_id] });
- }
- }, [unstarImages, itemDTO, unstarVideos]);
+ unstarImages({ image_names: [imageDTO.image_name] });
+ }, [unstarImages, imageDTO]);
- if (itemDTO.starred) {
+ if (imageDTO.starred) {
return (
- } onClickCapture={unstarImage}>
- {customStarUi ? customStarUi.off.text : t('gallery.unstarImage')}
+ } onClickCapture={unstarImage}>
+ {t('gallery.unstarImage')}
);
}
return (
- } onClickCapture={starImage}>
- {customStarUi ? customStarUi.on.text : t('gallery.starImage')}
+ } onClickCapture={starImage}>
+ {t('gallery.starImage')}
);
});
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx
index ae526243fe5..188be4c307d 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx
@@ -1,5 +1,5 @@
import { MenuItem } from '@invoke-ai/ui-library';
-import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { useCreateStylePresetFromMetadata } from 'features/gallery/hooks/useCreateStylePresetFromMetadata';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -7,7 +7,7 @@ import { PiPaintBrushBold } from 'react-icons/pi';
export const ContextMenuItemUseAsPromptTemplate = memo(() => {
const { t } = useTranslation();
- const imageDTO = useItemDTOContextImageOnly();
+ const imageDTO = useImageDTOContext();
const stylePreset = useCreateStylePresetFromMetadata(imageDTO);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx
index 41505ae81d5..918d2850342 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx
@@ -3,7 +3,7 @@ import { useAppStore } from 'app/store/storeHooks';
import { getDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
import { refImageAdded } from 'features/controlLayers/store/refImagesSlice';
import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
-import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
+import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { toast } from 'features/toast/toast';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@@ -12,7 +12,7 @@ import { PiImageBold } from 'react-icons/pi';
export const ContextMenuItemUseAsRefImage = memo(() => {
const { t } = useTranslation();
const store = useAppStore();
- const imageDTO = useItemDTOContextImageOnly();
+ const imageDTO = useImageDTOContext();
const onClickNewGlobalReferenceImageFromImage = useCallback(() => {
const { dispatch, getState } = store;
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx
deleted file mode 100644
index 12cbb22f9c9..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx
+++ /dev/null
@@ -1,46 +0,0 @@
-import { MenuItem } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { useAppSelector, useAppStore } from 'app/store/storeHooks';
-import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
-import { expandPrompt } from 'features/prompt/PromptExpansion/expand';
-import { promptExpansionApi } from 'features/prompt/PromptExpansion/state';
-import { selectAllowPromptExpansion } from 'features/system/store/configSlice';
-import { toast } from 'features/toast/toast';
-import { memo, useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiTextTBold } from 'react-icons/pi';
-
-export const ContextMenuItemUseForPromptGeneration = memo(() => {
- const { t } = useTranslation();
- const { dispatch, getState } = useAppStore();
- const imageDTO = useItemDTOContextImageOnly();
- const { isPending } = useStore(promptExpansionApi.$state);
- const isPromptExpansionEnabled = useAppSelector(selectAllowPromptExpansion);
-
- const handleUseForPromptGeneration = useCallback(() => {
- promptExpansionApi.setPending(imageDTO);
- expandPrompt({ dispatch, getState, imageDTO });
- toast({
- id: 'PROMPT_GENERATION_STARTED',
- title: t('toast.promptGenerationStarted'),
- status: 'info',
- });
- }, [dispatch, getState, imageDTO, t]);
-
- if (!isPromptExpansionEnabled) {
- return null;
- }
-
- return (
- }
- onClickCapture={handleUseForPromptGeneration}
- id="use-for-prompt-generation"
- isDisabled={isPending}
- >
- {t('gallery.useForPromptGeneration')}
-
- );
-});
-
-ContextMenuItemUseForPromptGeneration.displayName = 'ContextMenuItemUseForPromptGeneration';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx
index ee21261cb31..d148332943c 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx
@@ -1,10 +1,7 @@
import { MenuDivider, MenuItem } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { imagesToChangeSelected, isModalOpenChanged } from 'features/changeBoardModal/store/slice';
import { useDeleteImageModalApi } from 'features/deleteImageModal/store/state';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiDownloadSimpleBold, PiFoldersBold, PiStarBold, PiStarFill, PiTrashSimpleBold } from 'react-icons/pi';
@@ -18,49 +15,44 @@ const MultipleSelectionMenuItems = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selection = useAppSelector((s) => s.gallery.selection);
- const customStarUi = useStore($customStarUI);
const deleteImageModal = useDeleteImageModalApi();
- const isBulkDownloadEnabled = useFeatureStatus('bulkDownload');
-
const [starImages] = useStarImagesMutation();
const [unstarImages] = useUnstarImagesMutation();
const [bulkDownload] = useBulkDownloadImagesMutation();
const handleChangeBoard = useCallback(() => {
- dispatch(imagesToChangeSelected(selection.map((s) => s.id)));
+ dispatch(imagesToChangeSelected(selection));
dispatch(isModalOpenChanged(true));
}, [dispatch, selection]);
const handleDeleteSelection = useCallback(() => {
- deleteImageModal.delete(selection.map((s) => s.id));
+ deleteImageModal.delete(selection);
}, [deleteImageModal, selection]);
const handleStarSelection = useCallback(() => {
- starImages({ image_names: selection.map((s) => s.id) });
+ starImages({ image_names: selection });
}, [starImages, selection]);
const handleUnstarSelection = useCallback(() => {
- unstarImages({ image_names: selection.map((s) => s.id) });
+ unstarImages({ image_names: selection });
}, [unstarImages, selection]);
const handleBulkDownload = useCallback(() => {
- bulkDownload({ image_names: selection.map((s) => s.id) });
+ bulkDownload({ image_names: selection });
}, [selection, bulkDownload]);
return (
<>
- } onClickCapture={handleUnstarSelection}>
- {customStarUi ? customStarUi.off.text : `Unstar All`}
+ } onClickCapture={handleUnstarSelection}>
+ Unstar All
+
+ } onClickCapture={handleStarSelection}>
+ Star All
- } onClickCapture={handleStarSelection}>
- {customStarUi ? customStarUi.on.text : `Star All`}
+ } onClickCapture={handleBulkDownload}>
+ {t('gallery.downloadSelection')}
- {isBulkDownloadEnabled && (
- } onClickCapture={handleBulkDownload}>
- {t('gallery.downloadSelection')}
-
- )}
} onClickCapture={handleChangeBoard}>
{t('boards.changeBoard')}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx
deleted file mode 100644
index 47edf37d3ff..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx
+++ /dev/null
@@ -1,58 +0,0 @@
-import { MenuDivider, MenuItem } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $customStarUI } from 'app/store/nanostores/customStarUI';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { isModalOpenChanged, videosToChangeSelected } from 'features/changeBoardModal/store/slice';
-import { memo, useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiFoldersBold, PiStarBold, PiStarFill, PiTrashSimpleBold } from 'react-icons/pi';
-import { useDeleteVideosMutation, useStarVideosMutation, useUnstarVideosMutation } from 'services/api/endpoints/videos';
-
-const MultipleSelectionMenuItems = () => {
- const { t } = useTranslation();
- const dispatch = useAppDispatch();
- const selection = useAppSelector((s) => s.gallery.selection);
- const customStarUi = useStore($customStarUI);
-
- const [starVideos] = useStarVideosMutation();
- const [unstarVideos] = useUnstarVideosMutation();
- const [deleteVideos] = useDeleteVideosMutation();
-
- const handleChangeBoard = useCallback(() => {
- dispatch(videosToChangeSelected(selection.map((s) => s.id)));
- dispatch(isModalOpenChanged(true));
- }, [dispatch, selection]);
-
- const handleDeleteSelection = useCallback(() => {
- // TODO: Add confirm on delete and video usage functionality
- deleteVideos({ video_ids: selection.map((s) => s.id) });
- }, [deleteVideos, selection]);
-
- const handleStarSelection = useCallback(() => {
- starVideos({ video_ids: selection.map((s) => s.id) });
- }, [starVideos, selection]);
-
- const handleUnstarSelection = useCallback(() => {
- unstarVideos({ video_ids: selection.map((s) => s.id) });
- }, [unstarVideos, selection]);
-
- return (
- <>
- } onClickCapture={handleUnstarSelection}>
- {customStarUi ? customStarUi.off.text : `Unstar All`}
-
- } onClickCapture={handleStarSelection}>
- {customStarUi ? customStarUi.on.text : `Star All`}
-
- } onClickCapture={handleChangeBoard}>
- {t('boards.changeBoard')}
-
-
- } onClickCapture={handleDeleteSelection}>
- {t('gallery.deleteSelection')}
-
- >
- );
-};
-
-export default memo(MultipleSelectionMenuItems);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md
new file mode 100644
index 00000000000..0c96bbfa99c
--- /dev/null
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md
@@ -0,0 +1,18 @@
+# Image context menu
+
+The context menu is loosely based on https://github.com/lukasbach/chakra-ui-contextmenu.
+
+That library creates a component for _every_ instance of a thing that needed a context menu, which caused perf issues. This implementation uses a singleton pattern instead, with a single component that listens for context menu events and opens the menu as needed.
+
+Images register themselves with the context menu by mapping their DOM element to their image DTO. When a context menu event is fired, we look up the target element in the map (or its parents) to find the image DTO to show the context menu for.
+
+## Image actions
+
+- Recalling common individual metadata fields or all metadata
+- Opening the image in the image viewer or new tab
+- Copying the image to clipboard
+- Downloading the image
+- Selecting the image for comparison
+- Deleting the image
+- Moving the image to a different board
+- "Sending" the image to other parts of the app such as canvas
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx
index 3556e1a9d48..9d64395a8c5 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx
@@ -13,13 +13,10 @@ import { ContextMenuItemOpenInNewTab } from 'features/gallery/components/Context
import { ContextMenuItemOpenInViewer } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer';
import { ContextMenuItemSelectForCompare } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSelectForCompare';
import { ContextMenuItemSendToUpscale } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToUpscale';
-import { ContextMenuItemSendToVideo } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo';
import { ContextMenuItemStarUnstar } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar';
import { ContextMenuItemUseAsPromptTemplate } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate';
import { ContextMenuItemUseAsRefImage } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage';
-import { ContextMenuItemUseForPromptGeneration } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration';
-import { ItemDTOContextProvider } from 'features/gallery/contexts/ItemDTOContext';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
+import { ImageDTOContextProvider } from 'features/gallery/contexts/ImageDTOContext';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { ImageDTO } from 'services/api/types';
@@ -32,10 +29,9 @@ type SingleSelectionMenuItemsProps = {
const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) => {
const tab = useAppSelector(selectActiveTab);
- const isVideoEnabled = useFeatureStatus('video');
return (
-
+
@@ -50,8 +46,6 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
{tab === 'upscaling' && }
- {isVideoEnabled && }
-
{(tab === 'canvas' || tab === 'generate') && }
@@ -64,7 +58,7 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
// Only render this button on tabs with a gallery.
)}
-
+
);
};
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx
deleted file mode 100644
index d91fe886560..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-import { MenuDivider } from '@invoke-ai/ui-library';
-import { IconMenuItemGroup } from 'common/components/IconMenuItem';
-import { ContextMenuItemChangeBoard } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard';
-import { ContextMenuItemDownload } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload';
-import { ContextMenuItemOpenInNewTab } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab';
-import { ContextMenuItemOpenInViewer } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer';
-import { ItemDTOContextProvider } from 'features/gallery/contexts/ItemDTOContext';
-import type { VideoDTO } from 'services/api/types';
-
-import { ContextMenuItemDeleteVideo } from './MenuItems/ContextMenuItemDeleteVideo';
-import { ContextMenuItemStarUnstar } from './MenuItems/ContextMenuItemStarUnstar';
-
-type SingleSelectionVideoMenuItemsProps = {
- videoDTO: VideoDTO;
-};
-
-const SingleSelectionVideoMenuItems = ({ videoDTO }: SingleSelectionVideoMenuItemsProps) => {
- return (
-
-
-
-
-
-
-
-
-
-
-
- );
-};
-
-export default SingleSelectionVideoMenuItems;
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx
deleted file mode 100644
index 533a3d38b9a..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx
+++ /dev/null
@@ -1,279 +0,0 @@
-import type { ChakraProps } from '@invoke-ai/ui-library';
-import { Menu, MenuButton, MenuList, Portal, useGlobalMenuClose } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { useAppSelector } from 'app/store/storeHooks';
-import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
-import MultipleSelectionVideoMenuItems from 'features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems';
-import SingleSelectionVideoMenuItems from 'features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems';
-import { selectSelectionCount } from 'features/gallery/store/gallerySelectors';
-import { map } from 'nanostores';
-import type { RefObject } from 'react';
-import { memo, useCallback, useEffect, useRef } from 'react';
-import type { VideoDTO } from 'services/api/types';
-
-/**
- * The delay in milliseconds before the context menu opens on long press.
- */
-const LONGPRESS_DELAY_MS = 500;
-/**
- * The threshold in pixels that the pointer must move before the long press is cancelled.
- */
-const LONGPRESS_MOVE_THRESHOLD_PX = 10;
-
-/**
- * The singleton state of the context menu.
- */
-const $videoContextMenuState = map<{
- isOpen: boolean;
- videoDTO: VideoDTO | null;
- position: { x: number; y: number };
-}>({
- isOpen: false,
- videoDTO: null,
- position: { x: -1, y: -1 },
-});
-
-/**
- * Convenience function to close the context menu.
- */
-const onClose = () => {
- $videoContextMenuState.setKey('isOpen', false);
-};
-
-/**
- * Map of elements to image DTOs. This is used to determine which image DTO to show the context menu for, depending on
- * the target of the context menu or long press event.
- */
-const elToVideoMap = new Map();
-
-/**
- * Given a target node, find the first registered parent element that contains the target node and return the imageDTO
- * associated with it.
- */
-const getVideoDTOFromMap = (target: Node): VideoDTO | undefined => {
- const entry = Array.from(elToVideoMap.entries()).find((entry) => entry[0].contains(target));
- return entry?.[1];
-};
-
-/**
- * Register a context menu for an image DTO on a target element.
- * @param imageDTO The image DTO to register the context menu for.
- * @param targetRef The ref of the target element that should trigger the context menu.
- */
-export const useVideoContextMenu = (videoDTO: VideoDTO, ref: RefObject | (HTMLElement | null)) => {
- useEffect(() => {
- if (ref === null) {
- return;
- }
- const el = ref instanceof HTMLElement ? ref : ref.current;
- if (!el) {
- return;
- }
- elToVideoMap.set(el, videoDTO);
- return () => {
- elToVideoMap.delete(el);
- };
- }, [videoDTO, ref]);
-};
-
-/**
- * Singleton component that renders the context menu for images.
- */
-export const VideoContextMenu = memo(() => {
- useAssertSingleton('VideoContextMenu');
- const state = useStore($videoContextMenuState);
- useGlobalMenuClose(onClose);
-
- return (
-
-
-
-
- );
-});
-
-VideoContextMenu.displayName = 'VideoContextMenu';
-
-const _hover: ChakraProps['_hover'] = { bg: 'transparent' };
-
-/**
- * A logical component that listens for context menu events and opens the context menu. It's separate from
- * ImageContextMenu component to avoid re-rendering the whole context menu on every context menu event.
- */
-const VideoContextMenuEventLogical = memo(() => {
- const lastPositionRef = useRef<{ x: number; y: number }>({ x: -1, y: -1 });
- const longPressTimeoutRef = useRef(0);
- const animationTimeoutRef = useRef(0);
-
- const onContextMenu = useCallback((e: MouseEvent | PointerEvent) => {
- if (e.shiftKey) {
- // This is a shift + right click event, which should open the native context menu
- onClose();
- return;
- }
-
- const videoDTO = getVideoDTOFromMap(e.target as Node);
-
- if (!videoDTO) {
- // Can't find the image DTO, close the context menu
- onClose();
- return;
- }
-
- // clear pending delayed open
- window.clearTimeout(animationTimeoutRef.current);
- e.preventDefault();
-
- if (lastPositionRef.current.x !== e.pageX || lastPositionRef.current.y !== e.pageY) {
- // if the mouse moved, we need to close, wait for animation and reopen the menu at the new position
- if ($videoContextMenuState.get().isOpen) {
- onClose();
- }
- animationTimeoutRef.current = window.setTimeout(() => {
- // Open the menu after the animation with the new state
- $videoContextMenuState.set({
- isOpen: true,
- position: { x: e.pageX, y: e.pageY },
- videoDTO,
- });
- }, 100);
- } else {
- // else we can just open the menu at the current position w/ new state
- $videoContextMenuState.set({
- isOpen: true,
- position: { x: e.pageX, y: e.pageY },
- videoDTO,
- });
- }
-
- // Always sync the last position
- lastPositionRef.current = { x: e.pageX, y: e.pageY };
- }, []);
-
- // Use a long press to open the context menu on touch devices
- const onPointerDown = useCallback(
- (e: PointerEvent) => {
- if (e.pointerType === 'mouse') {
- // Bail out if it's a mouse event - this is for touch/pen only
- return;
- }
-
- longPressTimeoutRef.current = window.setTimeout(() => {
- onContextMenu(e);
- }, LONGPRESS_DELAY_MS);
-
- lastPositionRef.current = { x: e.pageX, y: e.pageY };
- },
- [onContextMenu]
- );
-
- const onPointerMove = useCallback((e: PointerEvent) => {
- if (e.pointerType === 'mouse') {
- // Bail out if it's a mouse event - this is for touch/pen only
- return;
- }
- if (longPressTimeoutRef.current === null) {
- return;
- }
-
- // If the pointer has moved more than the threshold, cancel the long press
- const lastPosition = lastPositionRef.current;
-
- const distanceFromLastPosition = Math.hypot(e.pageX - lastPosition.x, e.pageY - lastPosition.y);
-
- if (distanceFromLastPosition > LONGPRESS_MOVE_THRESHOLD_PX) {
- clearTimeout(longPressTimeoutRef.current);
- }
- }, []);
-
- const onPointerUp = useCallback((e: PointerEvent) => {
- if (e.pointerType === 'mouse') {
- // Bail out if it's a mouse event - this is for touch/pen only
- return;
- }
- if (longPressTimeoutRef.current) {
- clearTimeout(longPressTimeoutRef.current);
- }
- }, []);
-
- const onPointerCancel = useCallback((e: PointerEvent) => {
- if (e.pointerType === 'mouse') {
- // Bail out if it's a mouse event - this is for touch/pen only
- return;
- }
- if (longPressTimeoutRef.current) {
- clearTimeout(longPressTimeoutRef.current);
- }
- }, []);
-
- useEffect(() => {
- const controller = new AbortController();
-
- // Context menu events
- window.addEventListener('contextmenu', onContextMenu, { signal: controller.signal });
-
- // Long press events
- window.addEventListener('pointerdown', onPointerDown, { signal: controller.signal });
- window.addEventListener('pointerup', onPointerUp, { signal: controller.signal });
- window.addEventListener('pointercancel', onPointerCancel, { signal: controller.signal });
- window.addEventListener('pointermove', onPointerMove, { signal: controller.signal });
-
- return () => {
- controller.abort();
- };
- }, [onContextMenu, onPointerCancel, onPointerDown, onPointerMove, onPointerUp]);
-
- useEffect(
- () => () => {
- // Clean up any timeouts when we unmount
- window.clearTimeout(animationTimeoutRef.current);
- window.clearTimeout(longPressTimeoutRef.current);
- },
- []
- );
-
- return null;
-});
-
-VideoContextMenuEventLogical.displayName = 'VideoContextMenuEventLogical';
-
-// The content of the context menu, which changes based on the selection count. Split out and memoized to avoid
-// re-rendering the whole context menu too often.
-const MenuContent = memo(() => {
- const selectionCount = useAppSelector(selectSelectionCount);
- const state = useStore($videoContextMenuState);
-
- if (!state.videoDTO) {
- return null;
- }
-
- if (selectionCount > 1) {
- return (
-
-
-
- );
- }
-
- return (
-
-
-
- );
-});
-
-MenuContent.displayName = 'MenuContent';
diff --git a/invokeai/frontend/web/src/features/gallery/components/GalleryHeader.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryHeader.tsx
deleted file mode 100644
index 1bcf7eb68ce..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/GalleryHeader.tsx
+++ /dev/null
@@ -1,21 +0,0 @@
-import { Link } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $projectName, $projectUrl } from 'app/store/nanostores/projectId';
-import { memo } from 'react';
-
-export const GalleryHeader = memo(() => {
- const projectName = useStore($projectName);
- const projectUrl = useStore($projectUrl);
-
- if (projectName && projectUrl) {
- return (
-
- {projectName}
-
- );
- }
-
- return null;
-});
-
-GalleryHeader.displayName = 'GalleryHeader';
diff --git a/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx
similarity index 97%
rename from invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx
rename to invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx
index 026319392c8..f2c50f786ec 100644
--- a/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx
@@ -42,6 +42,9 @@ type GridContext = {
imageNames: string[];
};
+/**
+ * Wraps an image - either the placeholder as it is being loaded or the loaded image
+ */
const ImageAtPosition = memo(({ imageName }: { index: number; imageName: string }) => {
/*
* We rely on the useRangeBasedImageFetching to fetch all image DTOs, caching them with RTK Query.
@@ -126,8 +129,8 @@ const useKeyboardNavigation = (
const imageName = event.altKey
? // When the user holds alt, we are changing the image to compare - if no image to compare is currently selected,
// we start from the last selected image
- (selectImageToCompare(state) ?? selectLastSelectedItem(state)?.id)
- : selectLastSelectedItem(state)?.id;
+ (selectImageToCompare(state) ?? selectLastSelectedItem(state))
+ : selectLastSelectedItem(state);
const currentIndex = getItemIndex(imageName ?? null, imageNames);
@@ -174,7 +177,7 @@ const useKeyboardNavigation = (
if (event.altKey) {
dispatch(imageToCompareChanged(newImageName));
} else {
- dispatch(selectionChanged([{ type: 'image', id: newImageName }]));
+ dispatch(selectionChanged([newImageName]));
}
}
}
@@ -261,7 +264,7 @@ const useKeepSelectedImageInView = (
const selection = useAppSelector(selectSelection);
useEffect(() => {
- const targetImageName = selection.at(-1)?.id;
+ const targetImageName = selection.at(-1);
const virtuosoGridHandle = virtuosoRef.current;
const rootEl = rootRef.current;
const range = rangeRef.current;
@@ -280,7 +283,7 @@ const useStarImageHotkey = () => {
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
const selectionCount = useAppSelector(selectSelectionCount);
const isGalleryFocused = useIsRegionFocused('gallery');
- const imageDTO = useImageDTO(lastSelectedItem?.id);
+ const imageDTO = useImageDTO(lastSelectedItem);
const [starImages] = useStarImagesMutation();
const [unstarImages] = useUnstarImagesMutation();
@@ -307,7 +310,7 @@ const useStarImageHotkey = () => {
});
};
-export const ImageGallery = memo(() => {
+export const GalleryImageGrid = memo(() => {
const virtuosoRef = useRef(null);
const rangeRef = useRef({ startIndex: 0, endIndex: 0 });
const rootRef = useRef(null);
@@ -378,7 +381,7 @@ export const ImageGallery = memo(() => {
);
});
-ImageGallery.displayName = 'NewGallery';
+GalleryImageGrid.displayName = 'GalleryImageGrid';
const scrollSeekConfiguration: ScrollSeekConfiguration = {
enter: (velocity) => {
diff --git a/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx
similarity index 86%
rename from invokeai/frontend/web/src/features/gallery/components/Gallery.tsx
rename to invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx
index 11291bd5c72..874561e2048 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx
@@ -6,7 +6,6 @@ import { useDisclosure } from 'common/hooks/useBoolean';
import { useGallerySearchTerm } from 'features/gallery/components/ImageGrid/useGallerySearchTerm';
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
import { galleryViewChanged, selectGallerySlice } from 'features/gallery/store/gallerySlice';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { useAutoLayoutContext } from 'features/ui/layouts/auto-layout-context';
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
import type { CSSProperties } from 'react';
@@ -15,11 +14,10 @@ import { useTranslation } from 'react-i18next';
import { PiCaretDownBold, PiCaretUpBold, PiMagnifyingGlassBold } from 'react-icons/pi';
import { useBoardName } from 'services/api/hooks/useBoardName';
+import { GalleryImageGrid } from './GalleryImageGrid';
import { GallerySettingsPopover } from './GallerySettingsPopover/GallerySettingsPopover';
import { GalleryUploadButton } from './GalleryUploadButton';
import { GallerySearch } from './ImageGrid/GallerySearch';
-import { ImageGallery } from './NewGallery';
-import { VideoGallery } from './VideoGallery';
const COLLAPSE_STYLES: CSSProperties = { flexShrink: 0, minHeight: 0, width: '100%' };
@@ -44,10 +42,6 @@ export const GalleryPanel = memo(() => {
dispatch(galleryViewChanged('assets'));
}, [dispatch]);
- const handleClickVideos = useCallback(() => {
- dispatch(galleryViewChanged('videos'));
- }, [dispatch]);
-
const handleClickSearch = useCallback(() => {
onResetSearchTerm();
if (!searchDisclosure.isOpen && galleryPanel.$isCollapsed.get()) {
@@ -58,7 +52,6 @@ export const GalleryPanel = memo(() => {
const selectedBoardId = useAppSelector(selectSelectedBoardId);
const boardName = useBoardName(selectedBoardId);
- const isVideoEnabled = useFeatureStatus('video');
return (
@@ -83,16 +76,6 @@ export const GalleryPanel = memo(() => {
{t('parameters.images')}
- {isVideoEnabled && (
-
- )}
- {galleryView === 'videos' ? : }
+
);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
index 5a794530b0a..ccd58992ef6 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
@@ -46,7 +46,7 @@ const buildOnClick =
if (imageNames.length === 0) {
// For basic click without modifiers, we can still set selection
if (!shiftKey && !ctrlKey && !metaKey && !altKey) {
- dispatch(selectionChanged([{ type: 'image', id: imageName }]));
+ dispatch(selectionChanged([imageName]));
}
return;
}
@@ -61,7 +61,7 @@ const buildOnClick =
}
} else if (shiftKey) {
const rangeEndImageName = imageName;
- const lastSelectedImage = selection.at(-1)?.id;
+ const lastSelectedImage = selection.at(-1);
const lastClickedIndex = imageNames.findIndex((name) => name === lastSelectedImage);
const currentClickedIndex = imageNames.findIndex((name) => name === rangeEndImageName);
if (lastClickedIndex > -1 && currentClickedIndex > -1) {
@@ -72,16 +72,16 @@ const buildOnClick =
if (currentClickedIndex < lastClickedIndex) {
imagesToSelect.reverse();
}
- dispatch(selectionChanged(uniq(selection.concat(imagesToSelect.map((name) => ({ type: 'image', id: name }))))));
+ dispatch(selectionChanged(uniq(selection.concat(imagesToSelect))));
}
} else if (ctrlKey || metaKey) {
- if (selection.some((n) => n.id === imageName) && selection.length > 1) {
- dispatch(selectionChanged(uniq(selection.filter((n) => n.id !== imageName))));
+ if (selection.some((n) => n === imageName) && selection.length > 1) {
+ dispatch(selectionChanged(uniq(selection.filter((n) => n !== imageName))));
} else {
- dispatch(selectionChanged(uniq(selection.concat({ type: 'image', id: imageName }))));
+ dispatch(selectionChanged(uniq(selection.concat(imageName))));
}
} else {
- dispatch(selectionChanged([{ type: 'image', id: imageName }]));
+ dispatch(selectionChanged([imageName]));
}
};
@@ -98,7 +98,7 @@ export const GalleryImage = memo(({ imageDTO }: Props) => {
);
const isSelectedForCompare = useAppSelector(selectIsSelectedForCompare);
const selectIsSelected = useMemo(
- () => createSelector(selectGallerySlice, (gallery) => gallery.selection.some((s) => s.id === imageDTO.image_name)),
+ () => createSelector(selectGallerySlice, (gallery) => gallery.selection.some((n) => n === imageDTO.image_name)),
[imageDTO.image_name]
);
const isSelected = useAppSelector(selectIsSelected);
@@ -118,9 +118,9 @@ export const GalleryImage = memo(({ imageDTO }: Props) => {
// When we have multiple images selected, and the dragged image is part of the selection, initiate a
// multi-image drag.
- if (selection.length > 1 && selection.some((s) => s.id === imageDTO.image_name)) {
+ if (selection.length > 1 && selection.some((n) => n === imageDTO.image_name)) {
return multipleImageDndSource.getData({
- image_names: selection.map((s) => s.id),
+ image_names: selection,
board_id: boardId,
});
}
@@ -217,7 +217,7 @@ export const GalleryImage = memo(({ imageDTO }: Props) => {
maxH="full"
borderRadius="base"
/>
-
+
{dragPreviewState?.type === 'multiple-image' ? createMultipleImageDragPreview(dragPreviewState) : null}
{dragPreviewState?.type === 'single-image' ? createSingleImageDragPreview(dragPreviewState) : null}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx
index 2b5185b11f6..0a97bf819de 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx
@@ -5,33 +5,23 @@ import type { MouseEvent } from 'react';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiTrashSimpleFill } from 'react-icons/pi';
-import { useDeleteVideosMutation } from 'services/api/endpoints/videos';
-import { type ImageDTO, isImageDTO, type VideoDTO } from 'services/api/types';
+import type { ImageDTO } from 'services/api/types';
type Props = {
- itemDTO: ImageDTO | VideoDTO;
+ imageDTO: ImageDTO;
};
-export const GalleryItemDeleteIconButton = memo(({ itemDTO }: Props) => {
+export const GalleryItemDeleteIconButton = memo(({ imageDTO }: Props) => {
const shift = useShiftModifier();
const { t } = useTranslation();
const deleteImageModal = useDeleteImageModalApi();
- const [deleteVideos] = useDeleteVideosMutation();
const onClick = useCallback(
(e: MouseEvent) => {
e.stopPropagation();
- if (!itemDTO) {
- return;
- }
- if (isImageDTO(itemDTO)) {
- deleteImageModal.delete([itemDTO.image_name]);
- } else {
- // TODO: Add confirm on delete and video usage functionality
- deleteVideos({ video_ids: [itemDTO.video_id] });
- }
+ deleteImageModal.delete([imageDTO.image_name]);
},
- [deleteImageModal, deleteVideos, itemDTO]
+ [deleteImageModal, imageDTO]
);
if (!shift) {
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemHoverIcons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemHoverIcons.tsx
index 03e69780dc4..e453b112e14 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemHoverIcons.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemHoverIcons.tsx
@@ -5,22 +5,22 @@ import { GalleryItemSizeBadge } from 'features/gallery/components/ImageGrid/Gall
import { GalleryItemStarIconButton } from 'features/gallery/components/ImageGrid/GalleryItemStarIconButton';
import { selectAlwaysShouldImageSizeBadge } from 'features/gallery/store/gallerySelectors';
import { memo } from 'react';
-import type { ImageDTO, VideoDTO } from 'services/api/types';
+import type { ImageDTO } from 'services/api/types';
type Props = {
- itemDTO: ImageDTO | VideoDTO;
+ imageDTO: ImageDTO;
isHovered: boolean;
};
-export const GalleryItemHoverIcons = memo(({ itemDTO, isHovered }: Props) => {
+export const GalleryItemHoverIcons = memo(({ imageDTO, isHovered }: Props) => {
const alwaysShowImageSizeBadge = useAppSelector(selectAlwaysShouldImageSizeBadge);
return (
<>
- {(isHovered || alwaysShowImageSizeBadge) && }
- {(isHovered || itemDTO.starred) && }
- {isHovered && }
- {isHovered && }
+ {(isHovered || alwaysShowImageSizeBadge) && }
+ {(isHovered || imageDTO.starred) && }
+ {isHovered && }
+ {isHovered && }
>
);
});
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx
index 14a2f6f0874..21641d69fcd 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx
@@ -1,31 +1,26 @@
import { useAppDispatch } from 'app/store/storeHooks';
import { DndImageIcon } from 'features/dnd/DndImageIcon';
-import { imageToCompareChanged, itemSelected } from 'features/gallery/store/gallerySlice';
+import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { VIEWER_PANEL_ID } from 'features/ui/layouts/shared';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowsOutBold } from 'react-icons/pi';
-import { type ImageDTO, isImageDTO, type VideoDTO } from 'services/api/types';
+import type { ImageDTO } from 'services/api/types';
type Props = {
- itemDTO: ImageDTO | VideoDTO;
+ imageDTO: ImageDTO;
};
-export const GalleryItemOpenInViewerIconButton = memo(({ itemDTO }: Props) => {
+export const GalleryItemOpenInViewerIconButton = memo(({ imageDTO }: Props) => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const onClick = useCallback(() => {
- if (isImageDTO(itemDTO)) {
- dispatch(imageToCompareChanged(null));
- dispatch(itemSelected({ type: 'image', id: itemDTO.image_name }));
- } else {
- // dispatch(videoToCompareChanged(null));
- // dispatch(videoSelected(itemDTO.video_id));
- }
+ dispatch(imageToCompareChanged(null));
+ dispatch(imageSelected(imageDTO.image_name));
navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID);
- }, [dispatch, itemDTO]);
+ }, [dispatch, imageDTO]);
return (
{
+export const GalleryItemSizeBadge = memo(({ imageDTO }: Props) => {
return (
{
lineHeight={1.25}
borderTopEndRadius="base"
pointerEvents="none"
- >{`${itemDTO.width}x${itemDTO.height}`}
+ >{`${imageDTO.width}x${imageDTO.height}`}
);
});
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemStarIconButton.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemStarIconButton.tsx
index 798eba6834e..71607ff3ecb 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemStarIconButton.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemStarIconButton.tsx
@@ -1,57 +1,30 @@
-import { useStore } from '@nanostores/react';
-import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { DndImageIcon } from 'features/dnd/DndImageIcon';
import { memo, useCallback } from 'react';
import { PiStarBold, PiStarFill } from 'react-icons/pi';
import { useStarImagesMutation, useUnstarImagesMutation } from 'services/api/endpoints/images';
-import { useStarVideosMutation, useUnstarVideosMutation } from 'services/api/endpoints/videos';
-import { type ImageDTO, isImageDTO, type VideoDTO } from 'services/api/types';
+import type { ImageDTO } from 'services/api/types';
type Props = {
- itemDTO: ImageDTO | VideoDTO;
+ imageDTO: ImageDTO;
};
-export const GalleryItemStarIconButton = memo(({ itemDTO }: Props) => {
- const customStarUi = useStore($customStarUI);
+export const GalleryItemStarIconButton = memo(({ imageDTO }: Props) => {
const [starImages] = useStarImagesMutation();
const [unstarImages] = useUnstarImagesMutation();
- const [starVideos] = useStarVideosMutation();
- const [unstarVideos] = useUnstarVideosMutation();
const toggleStarredState = useCallback(() => {
- if (itemDTO.starred) {
- if (isImageDTO(itemDTO)) {
- unstarImages({ image_names: [itemDTO.image_name] });
- } else {
- unstarVideos({ video_ids: [itemDTO.video_id] });
- }
+ if (imageDTO.starred) {
+ unstarImages({ image_names: [imageDTO.image_name] });
} else {
- if (isImageDTO(itemDTO)) {
- starImages({ image_names: [itemDTO.image_name] });
- } else {
- starVideos({ video_ids: [itemDTO.video_id] });
- }
+ starImages({ image_names: [imageDTO.image_name] });
}
- }, [starImages, unstarImages, starVideos, unstarVideos, itemDTO]);
-
- if (customStarUi) {
- return (
-
- );
- }
+ }, [starImages, unstarImages, imageDTO]);
return (
: }
- tooltip={itemDTO.starred ? 'Unstar' : 'Star'}
+ icon={imageDTO.starred ? : }
+ tooltip={imageDTO.starred ? 'Unstar' : 'Star'}
position="absolute"
top={2}
insetInlineEnd={2}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx
index 0939e168e51..28c1c689396 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx
@@ -15,8 +15,7 @@ export const GallerySelectionCountTag = memo(() => {
const isGalleryFocused = useIsRegionFocused('gallery');
const onSelectPage = useCallback(() => {
- const selection = imageNames.map((name) => ({ type: 'image' as const, id: name }));
- dispatch(selectionChanged(selection));
+ dispatch(selectionChanged(imageNames));
}, [dispatch, imageNames]);
useRegisteredHotkeys({
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideo.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideo.tsx
deleted file mode 100644
index fe249b6019e..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideo.tsx
+++ /dev/null
@@ -1,218 +0,0 @@
-import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine';
-import { draggable, monitorForElements } from '@atlaskit/pragmatic-drag-and-drop/element/adapter';
-import { Flex, Image } from '@invoke-ai/ui-library';
-import { createSelector } from '@reduxjs/toolkit';
-import type { AppDispatch, AppGetState } from 'app/store/store';
-import { useAppSelector, useAppStore } from 'app/store/storeHooks';
-import { uniq } from 'es-toolkit';
-import { multipleVideoDndSource, singleVideoDndSource } from 'features/dnd/dnd';
-import type { DndDragPreviewMultipleVideoState } from 'features/dnd/DndDragPreviewMultipleVideo';
-import { createMultipleVideoDragPreview, setMultipleVideoDragPreview } from 'features/dnd/DndDragPreviewMultipleVideo';
-import type { DndDragPreviewSingleVideoState } from 'features/dnd/DndDragPreviewSingleVideo';
-import { createSingleVideoDragPreview, setSingleVideoDragPreview } from 'features/dnd/DndDragPreviewSingleVideo';
-import { firefoxDndFix } from 'features/dnd/util';
-import { useVideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu';
-import {
- selectGetVideoIdsQueryArgs,
- selectSelectedBoardId,
- selectSelection,
-} from 'features/gallery/store/gallerySelectors';
-import { imageToCompareChanged, selectGallerySlice, selectionChanged } from 'features/gallery/store/gallerySlice';
-import { navigationApi } from 'features/ui/layouts/navigation-api';
-import { VIEWER_PANEL_ID } from 'features/ui/layouts/shared';
-import type { MouseEvent, MouseEventHandler } from 'react';
-import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react';
-import { videosApi } from 'services/api/endpoints/videos';
-import type { VideoDTO } from 'services/api/types';
-
-import { galleryItemContainerSX } from './galleryItemContainerSX';
-import { GalleryItemHoverIcons } from './GalleryItemHoverIcons';
-import { GalleryVideoPlaceholder } from './GalleryVideoPlaceholder';
-
-interface Props {
- videoDTO: VideoDTO;
-}
-
-const buildOnClick =
- (videoId: string, dispatch: AppDispatch, getState: AppGetState) => (e: MouseEvent) => {
- const { shiftKey, ctrlKey, metaKey, altKey } = e;
- const state = getState();
- const queryArgs = selectGetVideoIdsQueryArgs(state);
- const videoIds = videosApi.endpoints.getVideoIds.select(queryArgs)(state).data?.video_ids ?? [];
-
- // If we don't have the video ids cached, we can't perform selection operations
- // This can happen if the user clicks on a video before the ids are loaded
- if (videoIds.length === 0) {
- // For basic click without modifiers, we can still set selection
- if (!shiftKey && !ctrlKey && !metaKey && !altKey) {
- dispatch(selectionChanged([{ type: 'video', id: videoId }]));
- }
- return;
- }
-
- const selection = state.gallery.selection;
-
- if (altKey) {
- if (state.gallery.imageToCompare === videoId) {
- dispatch(imageToCompareChanged(null));
- } else {
- dispatch(imageToCompareChanged(videoId));
- }
- } else if (shiftKey) {
- const rangeEndVideoId = videoId;
- const lastSelectedVideo = selection.at(-1)?.id;
- const lastClickedIndex = videoIds.findIndex((id) => id === lastSelectedVideo);
- const currentClickedIndex = videoIds.findIndex((id) => id === rangeEndVideoId);
- if (lastClickedIndex > -1 && currentClickedIndex > -1) {
- // We have a valid range!
- const start = Math.min(lastClickedIndex, currentClickedIndex);
- const end = Math.max(lastClickedIndex, currentClickedIndex);
- const videosToSelect = videoIds.slice(start, end + 1);
- dispatch(selectionChanged(uniq(selection.concat(videosToSelect.map((id) => ({ type: 'video', id }))))));
- }
- } else if (ctrlKey || metaKey) {
- if (selection.some((n) => n.id === videoId) && selection.length > 1) {
- dispatch(selectionChanged(uniq(selection.filter((n) => n.id !== videoId))));
- } else {
- dispatch(selectionChanged(uniq(selection.concat({ type: 'video', id: videoId }))));
- }
- } else {
- dispatch(selectionChanged([{ type: 'video', id: videoId }]));
- }
- };
-
-export const GalleryVideo = memo(({ videoDTO }: Props) => {
- const store = useAppStore();
- const [isDragging, setIsDragging] = useState(false);
- const [dragPreviewState, setDragPreviewState] = useState<
- DndDragPreviewSingleVideoState | DndDragPreviewMultipleVideoState | null
- >(null);
- const ref = useRef(null);
- const selectIsSelected = useMemo(
- () => createSelector(selectGallerySlice, (gallery) => gallery.selection.some((s) => s.id === videoDTO.video_id)),
- [videoDTO.video_id]
- );
- const isSelected = useAppSelector(selectIsSelected);
-
- useEffect(() => {
- const element = ref.current;
- if (!element) {
- return;
- }
- return combine(
- firefoxDndFix(element),
- draggable({
- element,
- getInitialData: () => {
- const selection = selectSelection(store.getState());
- const boardId = selectSelectedBoardId(store.getState());
-
- // When we have multiple images selected, and the dragged image is part of the selection, initiate a
- // multi-image drag.
- if (selection.length > 1 && selection.some((s) => s.id === videoDTO.video_id)) {
- return multipleVideoDndSource.getData({
- video_ids: selection.map((s) => s.id),
- board_id: boardId,
- });
- } // Otherwise, initiate a single-image drag
-
- return singleVideoDndSource.getData({ videoDTO }, videoDTO.video_id);
- },
- // This is a "local" drag start event, meaning that it is only called when this specific image is dragged.
- onDragStart: ({ source }) => {
- // When we start dragging a single image, set the dragging state to true. This is only called when this
- // specific image is dragged.
- if (singleVideoDndSource.typeGuard(source.data)) {
- setIsDragging(true);
- return;
- }
- },
- onGenerateDragPreview: (args) => {
- if (multipleVideoDndSource.typeGuard(args.source.data)) {
- setMultipleVideoDragPreview({
- multipleVideoDndData: args.source.data,
- onGenerateDragPreviewArgs: args,
- setDragPreviewState,
- });
- } else if (singleVideoDndSource.typeGuard(args.source.data)) {
- setSingleVideoDragPreview({
- singleVideoDndData: args.source.data,
- onGenerateDragPreviewArgs: args,
- setDragPreviewState,
- });
- }
- },
- }),
- monitorForElements({
- // This is a "global" drag start event, meaning that it is called for all drag events.
- onDragStart: ({ source }) => {
- // When we start dragging multiple images, set the dragging state to true if the dragged image is part of the
- // selection. This is called for all drag events.
- if (
- multipleVideoDndSource.typeGuard(source.data) &&
- source.data.payload.video_ids.includes(videoDTO.video_id)
- ) {
- setIsDragging(true);
- }
- },
- onDrop: () => {
- // Always set the dragging state to false when a drop event occurs.
- setIsDragging(false);
- },
- })
- );
- }, [videoDTO, store]);
-
- const [isHovered, setIsHovered] = useState(false);
-
- const onMouseOver = useCallback(() => {
- setIsHovered(true);
- }, []);
-
- const onMouseOut = useCallback(() => {
- setIsHovered(false);
- }, []);
-
- const onClick = useMemo(() => buildOnClick(videoDTO.video_id, store.dispatch, store.getState), [videoDTO, store]);
-
- const onDoubleClick = useCallback>(() => {
- store.dispatch(imageToCompareChanged(null));
- navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID);
- }, [store]);
-
- useVideoContextMenu(videoDTO, ref);
-
- return (
- <>
-
- }
- objectFit="contain"
- maxW="full"
- maxH="full"
- borderRadius="base"
- />
-
-
- {dragPreviewState?.type === 'multiple-video' ? createMultipleVideoDragPreview(dragPreviewState) : null}
- {dragPreviewState?.type === 'single-video' ? createSingleVideoDragPreview(dragPreviewState) : null}
- >
- );
-});
-
-GalleryVideo.displayName = 'GalleryVideo';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideoPlaceholder.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideoPlaceholder.tsx
deleted file mode 100644
index cca0f0aa51a..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideoPlaceholder.tsx
+++ /dev/null
@@ -1,11 +0,0 @@
-import { Flex, type FlexProps, Icon } from '@invoke-ai/ui-library';
-import { memo } from 'react';
-import { PiVideoBold } from 'react-icons/pi';
-
-export const GalleryVideoPlaceholder = memo((props: FlexProps) => (
-
-
-
-));
-
-GalleryVideoPlaceholder.displayName = 'GalleryVideoPlaceholder';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
index 482fadf111c..d66576aedb4 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
@@ -13,7 +13,6 @@ import {
useCollectionMetadataDatum,
useSingleMetadataDatum,
useUnrecallableMetadataDatum,
- VideoMetadataHandlers,
} from 'features/metadata/parsing';
import { memo, useCallback } from 'react';
import { PiArrowBendUpLeftBold } from 'react-icons/pi';
@@ -64,28 +63,6 @@ export const ImageMetadataActions = memo((props: Props) => {
ImageMetadataActions.displayName = 'ImageMetadataActions';
-export const VideoMetadataActions = memo((props: Props) => {
- const { metadata } = props;
-
- if (!metadata || Object.keys(metadata).length === 0) {
- return null;
- }
-
- return (
-
-
-
-
-
-
-
-
-
- );
-});
-
-VideoMetadataActions.displayName = 'VideoMetadataActions';
-
export const UnrecallableMetadataDatum = typedMemo(
({ metadata, handler }: { metadata: unknown; handler: UnrecallableMetadataHandler }) => {
const { data } = useUnrecallableMetadataDatum(metadata, handler);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/VideoMetadataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/VideoMetadataViewer.tsx
deleted file mode 100644
index f46ec8d470d..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/VideoMetadataViewer.tsx
+++ /dev/null
@@ -1,80 +0,0 @@
-import { ExternalLink, Flex, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library';
-import { IAINoContentFallback, IAINoContentFallbackWithSpinner } from 'common/components/IAIImageFallback';
-import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
-import { ImageMetadataHandlers } from 'features/metadata/parsing';
-import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { useDebouncedVideoMetadata } from 'services/api/hooks/useDebouncedMetadata';
-import type { VideoDTO } from 'services/api/types';
-
-import DataViewer from './DataViewer';
-import { UnrecallableMetadataDatum, VideoMetadataActions } from './ImageMetadataActions';
-
-type VideoMetadataViewerProps = {
- video: VideoDTO;
-};
-
-const VideoMetadataViewer = ({ video }: VideoMetadataViewerProps) => {
- const { t } = useTranslation();
-
- const { metadata, isLoading } = useDebouncedVideoMetadata(video.video_id);
-
- return (
-
-
-
-
-
-
- {t('metadata.recallParameters')}
- {t('metadata.metadata')}
- {t('metadata.videoDetails')}
-
-
-
-
- {isLoading && }
- {metadata && !isLoading && (
-
-
-
- )}
- {!metadata && !isLoading && }
-
-
- {metadata ? (
-
- ) : (
-
- )}
-
-
- {video ? (
-
- ) : (
-
- )}
-
- {/*
-
-
-
-
- */}
-
-
-
- );
-};
-
-export default memo(VideoMetadataViewer);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx
index 7994b231d69..bd9dc31a570 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx
@@ -13,7 +13,6 @@ import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
import { PostProcessingPopover } from 'features/parameters/components/PostProcessing/PostProcessingPopover';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
@@ -52,7 +51,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
boardIdSelected({
boardId: imageDTO.board_id ?? 'none',
select: {
- selection: [{ type: 'image', id: imageDTO.image_name }],
+ selection: [imageDTO.image_name],
galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets',
},
})
@@ -64,8 +63,6 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
const isCanvasOrGenerateOrUpscalingTab = tab === 'canvas' || tab === 'generate' || tab === 'upscaling';
const doesTabHaveGallery = tab === 'canvas' || tab === 'generate' || tab === 'workflows' || tab === 'upscaling';
- const isUpscalingEnabled = useFeatureStatus('upscaling');
-
const recallAll = useRecallAll(imageDTO);
const recallRemix = useRecallRemix(imageDTO);
const recallPrompts = useRecallPrompts(imageDTO);
@@ -182,7 +179,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
/>
)}
- {isUpscalingEnabled && }
+
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoButtons.tsx
deleted file mode 100644
index 28e91a1e5f0..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoButtons.tsx
+++ /dev/null
@@ -1,116 +0,0 @@
-import { Button, Divider, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { useDeleteVideo } from 'features/deleteImageModal/hooks/use-delete-video';
-import { DeleteVideoButton } from 'features/deleteVideoModal/components/DeleteVideoButton';
-import SingleSelectionVideoMenuItems from 'features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems';
-import { boardIdSelected } from 'features/gallery/store/gallerySlice';
-import { navigationApi } from 'features/ui/layouts/navigation-api';
-import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
-import { selectActiveTab } from 'features/ui/store/uiSelectors';
-import { useVideoViewerContext } from 'features/video/context/VideoViewerContext';
-import { useCaptureVideoFrame } from 'features/video/hooks/useCaptureVideoFrame';
-import { memo, useCallback, useState } from 'react';
-import { flushSync } from 'react-dom';
-import { useTranslation } from 'react-i18next';
-import { PiCameraBold, PiCrosshairBold, PiDotsThreeOutlineFill, PiSpinnerBold } from 'react-icons/pi';
-import type { VideoDTO } from 'services/api/types';
-
-export const CurrentVideoButtons = memo(({ videoDTO }: { videoDTO: VideoDTO }) => {
- const { t } = useTranslation();
- const tab = useAppSelector(selectActiveTab);
- const dispatch = useAppDispatch();
- const activeTab = useAppSelector(selectActiveTab);
- const galleryPanel = useGalleryPanel(activeTab);
- const deleteVideo = useDeleteVideo(videoDTO);
-
- const captureVideoFrame = useCaptureVideoFrame();
- const { videoRef } = useVideoViewerContext();
- const [capturing, setCapturing] = useState(false);
-
- const locateInGallery = useCallback(() => {
- navigationApi.expandRightPanel();
- galleryPanel.expand();
- flushSync(() => {
- dispatch(
- boardIdSelected({
- boardId: videoDTO.board_id ?? 'none',
- select: {
- selection: [{ type: 'video', id: videoDTO.video_id }],
- galleryView: 'videos',
- },
- })
- );
- });
- }, [dispatch, galleryPanel, videoDTO]);
-
- const onClickSaveFrame = useCallback(async () => {
- setCapturing(true);
- await captureVideoFrame(videoRef.current);
- setCapturing(false);
- }, [captureVideoFrame, videoRef]);
-
- const doesTabHaveGallery = tab === 'canvas' || tab === 'generate' || tab === 'workflows' || tab === 'upscaling';
-
- // const recallAll = useRecallAll(imageDTO);
- // const recallRemix = useRecallRemix(imageDTO);
- // const recallPrompts = useRecallPrompts(imageDTO);
- // const recallSeed = useRecallSeed(imageDTO);
- // const recallDimensions = useRecallDimensions(imageDTO);
- // const loadWorkflow = useLoadWorkflow(imageDTO);
- // const editImage = useEditImage(imageDTO);
- // const deleteImage = useDeleteImage(imageDTO);
-
- return (
- <>
-
-
-
-
- : }
- onClick={onClickSaveFrame}
- isDisabled={capturing || !videoRef}
- variant="link"
- size="sm"
- alignSelf="stretch"
- px={2}
- isLoading={capturing}
- loadingText="Capturing..."
- >
- {capturing ? 'Capturing...' : 'Save Current Frame'}
-
-
-
-
- {doesTabHaveGallery && (
- <>
- }
- aria-label={t('boards.locateInGalery')}
- tooltip={t('boards.locateInGalery')}
- onClick={locateInGallery}
- variant="link"
- size="sm"
- alignSelf="stretch"
- />
-
- >
- )}
-
-
- >
- );
-});
-
-CurrentVideoButtons.displayName = 'CurrentVideoButtons';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx
deleted file mode 100644
index 25c9806ab72..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx
+++ /dev/null
@@ -1,85 +0,0 @@
-import { Box, Flex } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import VideoMetadataViewer from 'features/gallery/components/ImageMetadataViewer/VideoMetadataViewer';
-import NextPrevItemButtons from 'features/gallery/components/NextPrevItemButtons';
-import { selectShouldShowItemDetails } from 'features/ui/store/uiSelectors';
-import { VideoView } from 'features/video/components/VideoView';
-import type { AnimationProps } from 'framer-motion';
-import { AnimatePresence, motion } from 'framer-motion';
-import { memo, useCallback, useRef, useState } from 'react';
-import type { VideoDTO } from 'services/api/types';
-
-import { NoContentForViewer } from './NoContentForViewer';
-
-export const CurrentVideoPreview = memo(({ videoDTO }: { videoDTO: VideoDTO | null }) => {
- const shouldShowItemDetails = useAppSelector(selectShouldShowItemDetails);
-
- // Show and hide the next/prev buttons on mouse move
- const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] = useState(false);
- const timeoutId = useRef(0);
- const onMouseOver = useCallback(() => {
- setShouldShowNextPrevButtons(true);
- window.clearTimeout(timeoutId.current);
- }, []);
- const onMouseOut = useCallback(() => {
- timeoutId.current = window.setTimeout(() => {
- setShouldShowNextPrevButtons(false);
- }, 500);
- }, []);
-
- return (
-
- {videoDTO && videoDTO.video_url && (
-
-
-
- )}
- {!videoDTO && }
- {shouldShowItemDetails && videoDTO && (
-
-
-
- )}
-
- {shouldShowNextPrevButtons && videoDTO && (
-
-
-
- )}
-
-
- );
-});
-CurrentVideoPreview.displayName = 'CurrentVideoPreview';
-
-const initial: AnimationProps['initial'] = {
- opacity: 0,
-};
-const animateArrows: AnimationProps['animate'] = {
- opacity: 1,
- transition: { duration: 0.07 },
-};
-const exit: AnimationProps['exit'] = {
- opacity: 0,
- transition: { duration: 0.07 },
-};
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx
index e3bf51bbdbf..80dbad347c7 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx
@@ -40,7 +40,7 @@ ImageComparisonContent.displayName = 'ImageComparisonContent';
export const ImageComparison = memo(() => {
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const lastSelectedImageDTO = useImageDTO(lastSelectedItem?.id);
+ const lastSelectedImageDTO = useImageDTO(lastSelectedItem);
const comparisonImageDTO = useImageDTO(useAppSelector(selectImageToCompare));
const [rect, setRect] = useState(null);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx
index 7123a8fbf37..edfada8bc2d 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx
@@ -1,6 +1,4 @@
import { Box, Flex, Image } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $crossOrigin } from 'app/store/nanostores/authToken';
import { useAppSelector } from 'app/store/storeHooks';
import { useBoolean } from 'common/hooks/useBoolean';
import { preventDefault } from 'common/util/stopPropagation';
@@ -14,8 +12,6 @@ import type { ComparisonProps } from './common';
import { fitDimsToContainer, getSecondImageDims } from './common';
export const ImageComparisonHover = memo(({ firstImage, secondImage, rect }: ComparisonProps) => {
- const crossOrigin = useStore($crossOrigin);
-
const comparisonFit = useAppSelector(selectComparisonFit);
const imageContainerRef = useRef(null);
const mouseOver = useBoolean(false);
@@ -57,7 +53,6 @@ export const ImageComparisonHover = memo(({ firstImage, secondImage, rect }: Com
id="image-comparison-hover-first-image"
src={firstImage.image_url}
fallbackSrc={firstImage.thumbnail_url}
- crossOrigin={crossOrigin}
w={fittedDims.width}
h={fittedDims.height}
maxW="full"
@@ -94,7 +89,6 @@ export const ImageComparisonHover = memo(({ firstImage, secondImage, rect }: Com
id="image-comparison-hover-second-image"
src={secondImage.image_url}
fallbackSrc={secondImage.thumbnail_url}
- crossOrigin={crossOrigin}
w={compareImageDims.width}
h={compareImageDims.height}
maxW={fittedDims.width}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx
index 45c4201c19d..a84e842dccc 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx
@@ -1,6 +1,4 @@
import { Flex, Image } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $crossOrigin } from 'app/store/nanostores/authToken';
import type { ComparisonProps } from 'features/gallery/components/ImageViewer/common';
import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel';
import { VerticalResizeHandle } from 'features/ui/components/tabs/ResizeHandle';
@@ -43,8 +41,6 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Comp
ImageComparisonSideBySide.displayName = 'ImageComparisonSideBySide';
const SideBySideImage = memo(({ imageDTO, type }: { imageDTO: ImageDTO; type: 'first' | 'second' }) => {
- const crossOrigin = useStore($crossOrigin);
-
return (
@@ -56,7 +52,6 @@ const SideBySideImage = memo(({ imageDTO, type }: { imageDTO: ImageDTO; type: 'f
maxH="full"
src={imageDTO.image_url}
fallbackSrc={imageDTO.thumbnail_url}
- crossOrigin={crossOrigin}
objectFit="contain"
borderRadius="base"
/>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx
index ce4bc5f083d..1f0d64aafeb 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx
@@ -1,6 +1,4 @@
import { Box, Flex, Icon, Image } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $crossOrigin } from 'app/store/nanostores/authToken';
import { useAppSelector } from 'app/store/storeHooks';
import { preventDefault } from 'common/util/stopPropagation';
import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern';
@@ -23,7 +21,6 @@ const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`;
export const ImageComparisonSlider = memo(({ firstImage, secondImage, rect }: ComparisonProps) => {
const comparisonFit = useAppSelector(selectComparisonFit);
- const crossOrigin = useStore($crossOrigin);
// How far the handle is from the left - this will be a CSS calculation that takes into account the handle width
const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX);
@@ -135,7 +132,6 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, rect }: Co
id="image-comparison-second-image"
src={secondImage.image_url}
fallbackSrc={secondImage.thumbnail_url}
- crossOrigin={crossOrigin}
w={compareImageDims.width}
h={compareImageDims.height}
maxW={fittedDims.width}
@@ -158,7 +154,6 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, rect }: Co
id="image-comparison-first-image"
src={firstImage.image_url}
fallbackSrc={firstImage.thumbnail_url}
- crossOrigin={crossOrigin}
w={fittedDims.width}
h={fittedDims.height}
objectFit="cover"
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx
index 72d4a79e890..ce9795ee8b0 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx
@@ -16,7 +16,7 @@ export const ImageViewer = memo(() => {
const { t } = useTranslation();
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const lastSelectedImageDTO = useImageDTO(lastSelectedItem?.type === 'image' ? lastSelectedItem.id : null);
+ const lastSelectedImageDTO = useImageDTO(lastSelectedItem ?? null);
return (
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx
index cea53931924..9e829ea8dcf 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx
@@ -6,7 +6,6 @@ import { memo } from 'react';
import { ImageViewerContextProvider } from './context';
import { ImageComparison } from './ImageComparison';
import { ImageViewer } from './ImageViewer';
-import { VideoViewer } from './VideoViewer';
const selectIsComparing = createSelector(
[selectLastSelectedItem, selectImageToCompare],
@@ -23,8 +22,7 @@ export const ImageViewerPanel = memo(() => {
// The image viewer renders progress images - if no image is selected, show the image viewer anyway
!isComparing && !lastSelectedItem &&
}
- {!isComparing && lastSelectedItem?.type === 'image' && }
- {!isComparing && lastSelectedItem?.type === 'video' && }
+ {!isComparing && }
{isComparing && }
);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx
index 7e5f9cd9cea..b963f5a80d6 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx
@@ -10,7 +10,7 @@ import { ToggleProgressButton } from './ToggleProgressButton';
export const ImageViewerToolbar = memo(() => {
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const imageDTO = useImageDTO(lastSelectedItem?.id);
+ const imageDTO = useImageDTO(lastSelectedItem);
return (
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx
index 62248a1883c..1649a14c511 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx
@@ -1,14 +1,10 @@
import type { ButtonProps } from '@invoke-ai/ui-library';
import { Alert, AlertDescription, AlertIcon, Button, Divider, Flex, Link, Spinner, Text } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import { InvokeLogoIcon } from 'common/components/InvokeLogoIcon';
import { LOADING_SYMBOL, useHasImages } from 'features/gallery/hooks/useHasImages';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
-import { selectIsLocal } from 'features/system/store/configSlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
-import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { PropsWithChildren } from 'react';
import { memo, useCallback, useMemo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
@@ -18,14 +14,11 @@ import { useMainModels } from 'services/api/hooks/modelsByType';
export const NoContentForViewer = memo(() => {
const hasImages = useHasImages();
const [mainModels, { data }] = useMainModels();
- const isLocal = useAppSelector(selectIsLocal);
- const isEnabled = useFeatureStatus('starterModels');
- const activeTab = useAppSelector(selectActiveTab);
const { t } = useTranslation();
const showStarterBundles = useMemo(() => {
- return isEnabled && data && mainModels.length === 0;
- }, [mainModels.length, data, isEnabled]);
+ return data && mainModels.length === 0;
+ }, [mainModels.length, data]);
if (hasImages === LOADING_SYMBOL) {
// Blank bg w/ a spinner. The new user experience components below have an invoke logo, but it's not centered.
@@ -43,11 +36,10 @@ export const NoContentForViewer = memo(() => {
- {isLocal ? : activeTab === 'workflows' ? : }
+
{showStarterBundles && }
-
- {isLocal && }
+
);
@@ -97,37 +89,6 @@ const GetStartedLocal = () => {
);
};
-const GetStartedCommercial = () => {
- return (
-
-
-
- );
-};
-
-const GetStartedWorkflows = () => {
- return (
-
-
-
- );
-};
-
-const GettingStartedVideosCallout = () => {
- return (
-
-
- ),
- }}
- />
-
- );
-};
-
const StarterBundlesCallout = () => {
const handleClickDownloadStarterModels = useCallback(() => {
navigationApi.switchToTab('models');
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx
deleted file mode 100644
index b06ac30c172..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import { Divider, Flex } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
-import { VideoViewerContextProvider } from 'features/video/context/VideoViewerContext';
-import { memo } from 'react';
-import { useVideoDTO } from 'services/api/endpoints/videos';
-
-import { CurrentVideoPreview } from './CurrentVideoPreview';
-import { VideoViewerToolbar } from './VideoViewerToolbar';
-
-export const VideoViewer = memo(() => {
- const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const videoDTO = useVideoDTO(lastSelectedItem?.type === 'video' ? lastSelectedItem.id : null);
-
- return (
-
-
-
-
-
-
-
-
-
- );
-});
-
-VideoViewer.displayName = 'VideoViewer';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx
deleted file mode 100644
index 994da11ae14..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx
+++ /dev/null
@@ -1,22 +0,0 @@
-import { Flex } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton';
-import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
-import { memo } from 'react';
-import { useVideoDTO } from 'services/api/endpoints/videos';
-
-import { CurrentVideoButtons } from './CurrentVideoButtons';
-
-export const VideoViewerToolbar = memo(() => {
- const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const videoDTO = useVideoDTO(lastSelectedItem?.type === 'video' ? lastSelectedItem.id : null);
-
- return (
-
- {videoDTO && }
- {videoDTO && }
-
- );
-});
-
-VideoViewerToolbar.displayName = 'VideoViewerToolbar';
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts
index 0953a96156b..31bb1648b8f 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts
@@ -65,7 +65,7 @@ export const getSecondImageDims = (
return { width, height };
};
export const selectComparisonImages = createMemoizedSelector(selectGallerySlice, (gallerySlice) => {
- const firstImage = gallerySlice.selection.slice(-1)[0]?.id ?? null;
+ const firstImage = gallerySlice.selection.at(-1) ?? null;
const secondImage = gallerySlice.imageToCompare;
return { firstImage, secondImage };
});
diff --git a/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx
index c7a6823f4d5..96b3699cafb 100644
--- a/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx
@@ -3,51 +3,47 @@ import { Box, IconButton } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { clamp } from 'es-toolkit/compat';
import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
-import { itemSelected } from 'features/gallery/store/gallerySlice';
+import { imageSelected } from 'features/gallery/store/gallerySlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi';
import { useGalleryImageNames } from './use-gallery-image-names';
-import { useGalleryVideoIds } from './use-gallery-video-ids';
const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineStart' | 'insetInlineEnd'] }) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
const { imageNames, isFetching } = useGalleryImageNames();
- const { videoIds, isFetching: isFetchingVideos } = useGalleryVideoIds();
const isOnFirstItem = useMemo(
- () => (lastSelectedItem ? imageNames.at(0) === lastSelectedItem.id : false),
+ () => (lastSelectedItem ? imageNames.at(0) === lastSelectedItem : false),
[imageNames, lastSelectedItem]
);
const isOnLastItem = useMemo(
- () => (lastSelectedItem ? imageNames.at(-1) === lastSelectedItem.id : false),
+ () => (lastSelectedItem ? imageNames.at(-1) === lastSelectedItem : false),
[imageNames, lastSelectedItem]
);
const onClickLeftArrow = useCallback(() => {
- const items = lastSelectedItem?.type === 'image' ? imageNames : videoIds;
- const targetIndex = lastSelectedItem ? items.findIndex((n) => n === lastSelectedItem.id) - 1 : 0;
- const clampedIndex = clamp(targetIndex, 0, items.length - 1);
- const n = items.at(clampedIndex);
+ const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem) - 1 : 0;
+ const clampedIndex = clamp(targetIndex, 0, imageNames.length - 1);
+ const n = imageNames.at(clampedIndex);
if (!n) {
return;
}
- dispatch(itemSelected({ type: lastSelectedItem?.type ?? 'image', id: n }));
- }, [dispatch, imageNames, lastSelectedItem, videoIds]);
+ dispatch(imageSelected(n));
+ }, [dispatch, imageNames, lastSelectedItem]);
const onClickRightArrow = useCallback(() => {
- const items = lastSelectedItem?.type === 'image' ? imageNames : videoIds;
- const targetIndex = lastSelectedItem ? items.findIndex((n) => n === lastSelectedItem.id) + 1 : 0;
- const clampedIndex = clamp(targetIndex, 0, items.length - 1);
- const n = items.at(clampedIndex);
+ const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem) + 1 : 0;
+ const clampedIndex = clamp(targetIndex, 0, imageNames.length - 1);
+ const n = imageNames.at(clampedIndex);
if (!n) {
return;
}
- dispatch(itemSelected({ type: lastSelectedItem?.type ?? 'image', id: n }));
- }, [dispatch, imageNames, lastSelectedItem, videoIds]);
+ dispatch(imageSelected(n));
+ }, [dispatch, imageNames, lastSelectedItem]);
return (
@@ -60,7 +56,7 @@ const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineSt
icon={}
variant="unstyled"
onClick={onClickLeftArrow}
- isDisabled={isFetching || isFetchingVideos}
+ isDisabled={isFetching}
color="base.100"
pointerEvents="auto"
insetInlineStart={inset}
@@ -75,7 +71,7 @@ const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineSt
icon={}
variant="unstyled"
onClick={onClickRightArrow}
- isDisabled={isFetching || isFetchingVideos}
+ isDisabled={isFetching}
color="base.100"
pointerEvents="auto"
insetInlineEnd={inset}
diff --git a/invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx b/invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx
deleted file mode 100644
index 766971e76ec..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx
+++ /dev/null
@@ -1,390 +0,0 @@
-import { Box, Flex, forwardRef, Grid, GridItem, Spinner, Text } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { createSelector } from '@reduxjs/toolkit';
-import { $accountTypeText } from 'app/store/nanostores/accountTypeText';
-import { useAppSelector, useAppStore } from 'app/store/storeHooks';
-import { getFocusedRegion } from 'common/hooks/focus';
-import { useRangeBasedVideoFetching } from 'features/gallery/hooks/useRangeBasedVideoFetching';
-import type { selectGetVideoIdsQueryArgs } from 'features/gallery/store/gallerySelectors';
-import { selectGalleryImageMinimumWidth, selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
-import { selectionChanged } from 'features/gallery/store/gallerySlice';
-import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
-import { selectAllowVideo } from 'features/system/store/configSlice';
-import type { MutableRefObject } from 'react';
-import React, { memo, useCallback, useEffect, useMemo, useRef } from 'react';
-import type {
- GridComponents,
- GridComputeItemKey,
- GridItemContent,
- ListRange,
- ScrollSeekConfiguration,
- VirtuosoGridHandle,
-} from 'react-virtuoso';
-import { VirtuosoGrid } from 'react-virtuoso';
-import { videosApi } from 'services/api/endpoints/videos';
-import { useDebounce } from 'use-debounce';
-
-import { getItemIndex } from './getItemIndex';
-import { getItemsPerRow } from './getItemsPerRow';
-import { GallerySelectionCountTag } from './ImageGrid/GallerySelectionCountTag';
-import { GalleryVideo } from './ImageGrid/GalleryVideo';
-import { GalleryVideoPlaceholder } from './ImageGrid/GalleryVideoPlaceholder';
-import { scrollIntoView } from './scrollIntoView';
-import { useGalleryVideoIds } from './use-gallery-video-ids';
-import { useScrollableGallery } from './useScrollableGallery';
-
-type ListVideoIdsQueryArgs = ReturnType;
-
-type GridContext = {
- queryArgs: ListVideoIdsQueryArgs;
- videoIds: string[];
-};
-
-const VideoAtPosition = memo(({ videoId }: { index: number; videoId: string }) => {
- /*
- * We rely on the useRangeBasedImageFetching to fetch all image DTOs, caching them with RTK Query.
- *
- * In this component, we just want to consume that cache. Unforutnately, RTK Query does not provide a way to
- * subscribe to a query without triggering a new fetch.
- *
- * There is a hack, though:
- * - https://github.com/reduxjs/redux-toolkit/discussions/4213
- *
- * This essentially means "subscribe to the query once it has some data".
- * One issue with this approach. When an item DTO is already cached - for example, because it is selected and
- * rendered in the viewer - it will show up in the grid before the other items have loaded. This is most
- * noticeable when first loading a board. The first item in the board is selected and rendered immediately in
- * the viewer, caching the DTO. The gallery grid renders, and that first item displays as a thumbnail while the
- * others are still placeholders. After a moment, the rest of the items load up and display as thumbnails.
- */
-
- // Use `currentData` instead of `data` to prevent a flash of previous image rendered at this index
- const { currentData: videoDTO, isUninitialized } = videosApi.endpoints.getVideoDTO.useQueryState(videoId);
- videosApi.endpoints.getVideoDTO.useQuerySubscription(videoId, { skip: isUninitialized });
-
- if (!videoDTO) {
- return ;
- }
-
- return ;
-});
-VideoAtPosition.displayName = 'VideoAtPosition';
-
-const computeItemKey: GridComputeItemKey = (index, itemId, { queryArgs }) => {
- return `${JSON.stringify(queryArgs)}-${itemId ?? index}`;
-};
-
-/**
- * Handles keyboard navigation for the gallery.
- */
-const useKeyboardNavigation = (
- itemIds: string[],
- virtuosoRef: React.RefObject,
- rootRef: React.RefObject
-) => {
- const { dispatch, getState } = useAppStore();
-
- const handleKeyDown = useCallback(
- (event: KeyboardEvent) => {
- if (getFocusedRegion() !== 'gallery') {
- // Only handle keyboard navigation when the gallery is focused
- return;
- }
- // Only handle arrow keys
- if (!['ArrowUp', 'ArrowDown', 'ArrowLeft', 'ArrowRight'].includes(event.key)) {
- return;
- }
- // Don't interfere if user is typing in an input
- if (event.target instanceof HTMLInputElement || event.target instanceof HTMLTextAreaElement) {
- return;
- }
-
- const rootEl = rootRef.current;
- const virtuosoGridHandle = virtuosoRef.current;
-
- if (!rootEl || !virtuosoGridHandle) {
- return;
- }
-
- if (itemIds.length === 0) {
- return;
- }
-
- const itemsPerRow = getItemsPerRow(rootEl);
-
- if (itemsPerRow === 0) {
- // This can happen if the grid is not yet rendered or has no items
- return;
- }
-
- event.preventDefault();
-
- const state = getState();
- const itemId = selectLastSelectedItem(state)?.id;
-
- const currentIndex = getItemIndex(itemId, itemIds);
-
- let newIndex = currentIndex;
-
- switch (event.key) {
- case 'ArrowLeft':
- if (currentIndex > 0) {
- newIndex = currentIndex - 1;
- }
- break;
- case 'ArrowRight':
- if (currentIndex < itemIds.length - 1) {
- newIndex = currentIndex + 1;
- }
- break;
- case 'ArrowUp':
- // If on first row, stay on current item
- if (currentIndex < itemsPerRow) {
- newIndex = currentIndex;
- } else {
- newIndex = Math.max(0, currentIndex - itemsPerRow);
- }
- break;
- case 'ArrowDown':
- // If no items below, stay on current item
- if (currentIndex >= itemIds.length - itemsPerRow) {
- newIndex = currentIndex;
- } else {
- newIndex = Math.min(itemIds.length - 1, currentIndex + itemsPerRow);
- }
- break;
- }
-
- if (newIndex !== currentIndex && newIndex >= 0 && newIndex < itemIds.length) {
- const nextItemId = itemIds[newIndex];
- if (nextItemId) {
- dispatch(selectionChanged([{ type: 'video', id: nextItemId }]));
- }
- }
- },
- [rootRef, virtuosoRef, itemIds, getState, dispatch]
- );
-
- useRegisteredHotkeys({
- id: 'galleryNavLeft',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavRight',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavUp',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavDown',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavLeftAlt',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavRightAlt',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavUpAlt',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-
- useRegisteredHotkeys({
- id: 'galleryNavDownAlt',
- category: 'gallery',
- callback: handleKeyDown,
- options: { preventDefault: true },
- dependencies: [handleKeyDown],
- });
-};
-
-/**
- * Keeps the last selected image in view when the gallery is scrolled.
- * This is useful for keyboard navigation and ensuring the user can see their selection.
- * It only tracks the last selected image, not the image to compare.
- */
-const useKeepSelectedVideoInView = (
- videoIds: string[],
- virtuosoRef: React.RefObject,
- rootRef: React.RefObject,
- rangeRef: MutableRefObject
-) => {
- const targetVideoId = useAppSelector(selectLastSelectedItem)?.id;
-
- useEffect(() => {
- const virtuosoGridHandle = virtuosoRef.current;
- const rootEl = rootRef.current;
- const range = rangeRef.current;
-
- if (!virtuosoGridHandle || !rootEl || !targetVideoId || !videoIds || videoIds.length === 0) {
- return;
- }
- scrollIntoView(targetVideoId, videoIds, rootEl, virtuosoGridHandle, range);
- }, [targetVideoId, videoIds, rangeRef, rootRef, virtuosoRef]);
-};
-
-export const VideoGallery = memo(() => {
- const virtuosoRef = useRef(null);
- const rangeRef = useRef({ startIndex: 0, endIndex: 0 });
- const rootRef = useRef(null);
-
- // Get the ordered list of image names - this is our primary data source for virtualization
- const { queryArgs, videoIds, isLoading } = useGalleryVideoIds();
-
- // Use range-based fetching for bulk loading image DTOs into cache based on the visible range
- const { onRangeChanged } = useRangeBasedVideoFetching({
- videoIds,
- enabled: !isLoading,
- });
-
- useKeepSelectedVideoInView(videoIds, virtuosoRef, rootRef, rangeRef);
- useKeyboardNavigation(videoIds, virtuosoRef, rootRef);
- const scrollerRef = useScrollableGallery(rootRef);
-
- /*
- * We have to keep track of the visible range for keep-selected-image-in-view functionality and push the range to
- * the range-based image fetching hook.
- */
- const handleRangeChanged = useCallback(
- (range: ListRange) => {
- rangeRef.current = range;
- onRangeChanged(range);
- },
- [onRangeChanged]
- );
-
- const context = useMemo(() => ({ videoIds, queryArgs }), [videoIds, queryArgs]);
-
- const isVideoEnabled = useAppSelector(selectAllowVideo);
- const accountTypeText = useStore($accountTypeText);
-
- if (!isVideoEnabled) {
- return (
-
-
- Video generation is not enabled for {accountTypeText} accounts
-
-
- );
- }
-
- if (isLoading) {
- return (
-
-
- Loading gallery...
-
- );
- }
-
- if (videoIds.length === 0) {
- return (
-
- No videos found
-
- );
- }
-
- return (
- // This wrapper component is necessary to initialize the overlay scrollbars!
-
-
- ref={virtuosoRef}
- context={context}
- data={videoIds}
- increaseViewportBy={4096}
- itemContent={itemContent}
- computeItemKey={computeItemKey}
- components={components}
- style={virtuosoGridStyle}
- scrollerRef={scrollerRef}
- scrollSeekConfiguration={scrollSeekConfiguration}
- rangeChanged={handleRangeChanged}
- />
-
-
- );
-});
-
-VideoGallery.displayName = 'VideoGallery';
-
-const scrollSeekConfiguration: ScrollSeekConfiguration = {
- enter: (velocity) => {
- return Math.abs(velocity) > 2048;
- },
- exit: (velocity) => {
- return velocity === 0;
- },
-};
-
-// Styles
-const virtuosoGridStyle = { height: '100%', width: '100%' };
-
-const selectGridTemplateColumns = createSelector(
- selectGalleryImageMinimumWidth,
- (galleryImageMinimumWidth) => `repeat(auto-fill, minmax(${galleryImageMinimumWidth}px, 1fr))`
-);
-
-// Grid components
-const ListComponent: GridComponents['List'] = forwardRef(({ context: _, ...rest }, ref) => {
- const _gridTemplateColumns = useAppSelector(selectGridTemplateColumns);
- const [gridTemplateColumns] = useDebounce(_gridTemplateColumns, 300);
-
- return ;
-});
-ListComponent.displayName = 'ListComponent';
-
-const itemContent: GridItemContent = (index, videoId) => {
- return ;
-};
-
-const ItemComponent: GridComponents['Item'] = forwardRef(({ context: _, ...rest }, ref) => (
-
-));
-ItemComponent.displayName = 'ItemComponent';
-
-const ScrollSeekPlaceholderComponent: GridComponents['ScrollSeekPlaceholder'] = (props) => (
-
-
-
-);
-
-ScrollSeekPlaceholderComponent.displayName = 'ScrollSeekPlaceholderComponent';
-
-const components: GridComponents = {
- Item: ItemComponent,
- List: ListComponent,
- ScrollSeekPlaceholder: ScrollSeekPlaceholderComponent,
-};
diff --git a/invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts b/invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts
deleted file mode 100644
index 5bb9c5c0b72..00000000000
--- a/invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-import { EMPTY_ARRAY } from 'app/store/constants';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectGetVideoIdsQueryArgs } from 'features/gallery/store/gallerySelectors';
-import { useGetVideoIdsQuery } from 'services/api/endpoints/videos';
-import { useDebounce } from 'use-debounce';
-
-const getVideoIdsQueryOptions = {
- refetchOnReconnect: true,
- selectFromResult: ({ currentData, isLoading, isFetching }) => ({
- videoIds: currentData?.video_ids ?? EMPTY_ARRAY,
- isLoading,
- isFetching,
- }),
-} satisfies Parameters[1];
-
-export const useGalleryVideoIds = () => {
- const _queryArgs = useAppSelector(selectGetVideoIdsQueryArgs);
- const [queryArgs] = useDebounce(_queryArgs, 300);
- const { videoIds, isLoading, isFetching } = useGetVideoIdsQuery(queryArgs, getVideoIdsQueryOptions);
- return { videoIds, isLoading, isFetching, queryArgs };
-};
diff --git a/invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts b/invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts
new file mode 100644
index 00000000000..e4ebbf3ec82
--- /dev/null
+++ b/invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts
@@ -0,0 +1,13 @@
+import { createContext, useContext } from 'react';
+import type { ImageDTO } from 'services/api/types';
+import { assert } from 'tsafe';
+
+const ImageDTOCOntext = createContext(null);
+
+export const ImageDTOContextProvider = ImageDTOCOntext.Provider;
+
+export const useImageDTOContext = () => {
+ const dto = useContext(ImageDTOCOntext);
+ assert(dto !== null, 'useItemDTOContext must be used within ItemDTOContextProvider');
+ return dto;
+};
diff --git a/invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts b/invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts
deleted file mode 100644
index 847a1c5d07f..00000000000
--- a/invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-import { createContext, useContext } from 'react';
-import { type ImageDTO, isImageDTO, type VideoDTO } from 'services/api/types';
-import { assert } from 'tsafe';
-
-const ItemDTOContext = createContext(null);
-
-export const ItemDTOContextProvider = ItemDTOContext.Provider;
-
-export const useItemDTOContext = () => {
- const itemDTO = useContext(ItemDTOContext);
- assert(itemDTO !== null, 'useItemDTOContext must be used within ItemDTOContextProvider');
- return itemDTO;
-};
-
-export const useItemDTOContextImageOnly = (): ImageDTO => {
- const itemDTO = useContext(ItemDTOContext);
- assert(itemDTO !== null, 'useItemDTOContext must be used within ItemDTOContextProvider');
- assert(isImageDTO(itemDTO), 'ItemDTO is not an image');
- return itemDTO as ImageDTO;
-};
diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts b/invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts
deleted file mode 100644
index 4808ea6e623..00000000000
--- a/invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts
+++ /dev/null
@@ -1,78 +0,0 @@
-import { useAppStore } from 'app/store/storeHooks';
-import { useCallback, useEffect, useState } from 'react';
-import type { ListRange } from 'react-virtuoso';
-import { useGetVideoDTOsByNamesMutation, videosApi } from 'services/api/endpoints/videos';
-import { useThrottledCallback } from 'use-debounce';
-
-interface UseRangeBasedVideoFetchingArgs {
- videoIds: string[];
- enabled: boolean;
-}
-
-interface UseRangeBasedVideoFetchingReturn {
- onRangeChanged: (range: ListRange) => void;
-}
-
-const getUncachedIds = (videoIds: string[], cachedVideoIds: string[], ranges: ListRange[]): string[] => {
- const uncachedIdsSet = new Set();
- const cachedVideoIdsSet = new Set(cachedVideoIds);
-
- for (const range of ranges) {
- for (let i = range.startIndex; i <= range.endIndex; i++) {
- const id = videoIds[i]!;
- if (id && !cachedVideoIdsSet.has(id)) {
- uncachedIdsSet.add(id);
- }
- }
- }
-
- return Array.from(uncachedIdsSet);
-};
-
-/**
- * Hook for bulk fetching image DTOs based on the visible range from virtuoso.
- * Individual image components should use `useGetImageDTOQuery(imageName)` to get their specific DTO.
- * This hook ensures DTOs are bulk fetched and cached efficiently.
- */
-export const useRangeBasedVideoFetching = ({
- videoIds,
- enabled,
-}: UseRangeBasedVideoFetchingArgs): UseRangeBasedVideoFetchingReturn => {
- const store = useAppStore();
- const [getVideoDTOsByNames] = useGetVideoDTOsByNamesMutation();
- const [lastRange, setLastRange] = useState(null);
- const [pendingRanges, setPendingRanges] = useState([]);
-
- const fetchVideos = useCallback(
- (ranges: ListRange[], videoIds: string[]) => {
- if (!enabled) {
- return;
- }
- const cachedVideoIds = videosApi.util.selectCachedArgsForQuery(store.getState(), 'getVideoDTO');
- const uncachedIds = getUncachedIds(videoIds, cachedVideoIds, ranges);
- // console.log('uncachedIds', uncachedIds);
- if (uncachedIds.length === 0) {
- return;
- }
- getVideoDTOsByNames({ video_ids: uncachedIds });
- setPendingRanges([]);
- },
- [enabled, getVideoDTOsByNames, store]
- );
-
- const throttledFetchVideos = useThrottledCallback(fetchVideos, 500);
-
- const onRangeChanged = useCallback((range: ListRange) => {
- setLastRange(range);
- setPendingRanges((prev) => [...prev, range]);
- }, []);
-
- useEffect(() => {
- const combinedRanges = lastRange ? [...pendingRanges, lastRange] : pendingRanges;
- throttledFetchVideos(combinedRanges, videoIds);
- }, [videoIds, lastRange, pendingRanges, throttledFetchVideos]);
-
- return {
- onRangeChanged,
- };
-};
diff --git a/invokeai/frontend/web/src/features/gallery/store/actions.ts b/invokeai/frontend/web/src/features/gallery/store/actions.ts
deleted file mode 100644
index 8d13c449369..00000000000
--- a/invokeai/frontend/web/src/features/gallery/store/actions.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import { createAction } from '@reduxjs/toolkit';
-import type { ImageDTO } from 'services/api/types';
-
-export const sentImageToCanvas = createAction('gallery/sentImageToCanvas');
-
-export const imageDownloaded = createAction('gallery/imageDownloaded');
-
-export const imageCopiedToClipboard = createAction('gallery/imageCopiedToClipboard');
-
-export const imageOpenedInNewTab = createAction('gallery/imageOpenedInNewTab');
-
-export const imageUploadedClientSide = createAction<{
- imageDTO: ImageDTO;
- silent: boolean;
- isFirstUploadOfBatch: boolean;
-}>('gallery/imageUploadedClientSide');
diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts
index 536fbd6d2a7..aad849fdb59 100644
--- a/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts
+++ b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts
@@ -2,7 +2,7 @@ import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { selectGallerySlice } from 'features/gallery/store/gallerySlice';
import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types';
-import type { GetImageNamesArgs, GetVideoIdsArgs, ListBoardsArgs } from 'services/api/types';
+import type { GetImageNamesArgs, ListBoardsArgs } from 'services/api/types';
export const selectFirstSelectedItem = createSelector(selectGallerySlice, (gallery) => gallery.selection.at(0));
export const selectLastSelectedItem = createSelector(selectGallerySlice, (gallery) => gallery.selection.at(-1));
@@ -24,9 +24,6 @@ const selectGalleryQueryCategories = createSelector(selectGalleryView, (galleryV
if (galleryView === 'images') {
return IMAGE_CATEGORIES;
}
- if (galleryView === 'videos') {
- return [];
- }
return ASSETS_CATEGORIES;
});
const selectGallerySearchTerm = createSelector(selectGallerySlice, (gallery) => gallery.searchTerm);
@@ -51,17 +48,6 @@ export const selectGetImageNamesQueryArgs = createMemoizedSelector(
})
);
-export const selectGetVideoIdsQueryArgs = createMemoizedSelector(
- [selectSelectedBoardId, selectGallerySearchTerm, selectGalleryOrderDir, selectGalleryStarredFirst],
- (board_id, search_term, order_dir, starred_first): GetVideoIdsArgs => ({
- board_id,
- search_term,
- order_dir,
- starred_first,
- is_intermediate: false,
- })
-);
-
export const selectAutoAssignBoardOnClick = createSelector(
selectGallerySlice,
(gallery) => gallery.autoAssignBoardOnClick
diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts
index 1f99fc7a6d3..d66feefa2c9 100644
--- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts
+++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts
@@ -2,7 +2,7 @@ import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import type { RootState } from 'app/store/store';
import type { SliceConfig } from 'app/store/types';
-import { isPlainObject } from 'es-toolkit';
+import { isPlainObject, uniq } from 'es-toolkit';
import type { BoardRecordOrderBy } from 'services/api/types';
import { assert } from 'tsafe';
@@ -40,7 +40,7 @@ const slice = createSlice({
name: 'gallery',
initialState: getInitialState(),
reducers: {
- itemSelected: (state, action: PayloadAction<{ type: 'image' | 'video'; id: string } | null>) => {
+ imageSelected: (state, action: PayloadAction) => {
const selectedItem = action.payload;
if (!selectedItem) {
@@ -49,14 +49,8 @@ const slice = createSlice({
state.selection = [selectedItem];
}
},
- selectionChanged: (state, action: PayloadAction<{ type: 'image' | 'video'; id: string }[]>) => {
- const uniqueById = new Map();
- for (const item of action.payload) {
- if (!uniqueById.has(item.id)) {
- uniqueById.set(item.id, item);
- }
- }
- state.selection = Array.from(uniqueById.values());
+ selectionChanged: (state, action: PayloadAction) => {
+ state.selection = uniq(action.payload);
},
imageToCompareChanged: (state, action: PayloadAction) => {
state.imageToCompare = action.payload;
@@ -122,8 +116,8 @@ const slice = createSlice({
comparedImagesSwapped: (state) => {
if (state.imageToCompare) {
const oldSelection = state.selection;
- state.selection = [{ type: 'image', id: state.imageToCompare }];
- state.imageToCompare = oldSelection[0]?.id ?? null;
+ state.selection = [state.imageToCompare];
+ state.imageToCompare = oldSelection[0] ?? null;
}
},
comparisonFitChanged: (state, action: PayloadAction<'contain' | 'fill'>) => {
@@ -151,7 +145,7 @@ const slice = createSlice({
});
export const {
- itemSelected,
+ imageSelected,
shouldAutoSwitchChanged,
autoAssignBoardOnClickChanged,
setGalleryImageMinimumWidth,
diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts
index 0a03c7d2662..addeefe870f 100644
--- a/invokeai/frontend/web/src/features/gallery/store/types.ts
+++ b/invokeai/frontend/web/src/features/gallery/store/types.ts
@@ -1,7 +1,7 @@
import type { ImageCategory } from 'services/api/types';
import z from 'zod';
-const zGalleryView = z.enum(['images', 'assets', 'videos']);
+const zGalleryView = z.enum(['images', 'assets']);
export type GalleryView = z.infer;
const zBoardId = z.string();
// TS hack to get autocomplete for "none" but accept any string
@@ -19,7 +19,7 @@ export const IMAGE_CATEGORIES: ImageCategory[] = ['general'];
export const ASSETS_CATEGORIES: ImageCategory[] = ['control', 'mask', 'user', 'other'];
export const zGalleryState = z.object({
- selection: z.array(z.object({ type: z.enum(['image', 'video']), id: z.string() })),
+ selection: z.array(z.string()),
shouldAutoSwitch: z.boolean(),
autoAssignBoardOnClick: z.boolean(),
autoAddBoardId: zBoardId,
diff --git a/invokeai/frontend/web/src/features/imageActions/README.md b/invokeai/frontend/web/src/features/imageActions/README.md
new file mode 100644
index 00000000000..22668b29ac5
--- /dev/null
+++ b/invokeai/frontend/web/src/features/imageActions/README.md
@@ -0,0 +1,5 @@
+# Image actions
+
+This dir is (unintentially) a dumping ground for things that we do with images. For example, adding an image as a canvas layer.
+
+Probably these functions should be moved to more appropriate places.
diff --git a/invokeai/frontend/web/src/features/imageActions/actions.ts b/invokeai/frontend/web/src/features/imageActions/actions.ts
index 14d27e900c1..2c9293127b4 100644
--- a/invokeai/frontend/web/src/features/imageActions/actions.ts
+++ b/invokeai/frontend/web/src/features/imageActions/actions.ts
@@ -42,7 +42,6 @@ import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
import { imageDTOToFile, imagesApi, uploadImage } from 'services/api/endpoints/images';
-import { videosApi } from 'services/api/endpoints/videos';
import type { ImageDTO } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -324,15 +323,3 @@ export const removeImagesFromBoard = (arg: { image_names: string[]; dispatch: Ap
dispatch(imagesApi.endpoints.removeImagesFromBoard.initiate({ image_names }, { track: false }));
dispatch(selectionChanged([]));
};
-
-export const addVideosToBoard = (arg: { video_ids: string[]; boardId: BoardId; dispatch: AppDispatch }) => {
- const { video_ids, boardId, dispatch } = arg;
- dispatch(videosApi.endpoints.addVideosToBoard.initiate({ video_ids, board_id: boardId }, { track: false }));
- dispatch(selectionChanged([]));
-};
-
-export const removeVideosFromBoard = (arg: { video_ids: string[]; dispatch: AppDispatch }) => {
- const { video_ids, dispatch } = arg;
- dispatch(videosApi.endpoints.removeVideosFromBoard.initiate({ video_ids }, { track: false }));
- dispatch(selectionChanged([]));
-};
diff --git a/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx b/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx
index f0d1ffe878b..748aa9ca65c 100644
--- a/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx
+++ b/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx
@@ -6,7 +6,6 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
import type { GroupStatusMap } from 'common/components/Picker/Picker';
import { loraAdded, selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectBase } from 'features/controlLayers/store/paramsSlice';
-import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import { ModelPicker } from 'features/parameters/components/ModelPicker';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -69,11 +68,8 @@ const LoRASelect = () => {
return undefined;
}
- // Determine the group ID for the current base model
- const groupId = API_BASE_MODELS.includes(currentBaseModel) ? 'api' : currentBaseModel;
-
// Return a map with only the current base model group enabled
- return { [groupId]: true } satisfies GroupStatusMap;
+ return { [currentBaseModel]: true } satisfies GroupStatusMap;
}, [currentBaseModel]);
return (
diff --git a/invokeai/frontend/web/src/features/metadata/parsing.tsx b/invokeai/frontend/web/src/features/metadata/parsing.tsx
index a2363004ef3..d964279c2aa 100644
--- a/invokeai/frontend/web/src/features/metadata/parsing.tsx
+++ b/invokeai/frontend/web/src/features/metadata/parsing.tsx
@@ -33,32 +33,12 @@ import {
widthChanged,
} from 'features/controlLayers/store/paramsSlice';
import { refImagesRecalled } from 'features/controlLayers/store/refImagesSlice';
-import type {
- CanvasMetadata,
- LoRA,
- RefImageState,
- VideoAspectRatio as ParameterVideoAspectRatio,
- VideoDuration as ParameterVideoDuration,
- VideoResolution as ParameterVideoResolution,
-} from 'features/controlLayers/store/types';
-import {
- zCanvasMetadata,
- zCanvasReferenceImageState_OLD,
- zRefImageState,
- zVideoAspectRatio,
- zVideoDuration,
- zVideoResolution,
-} from 'features/controlLayers/store/types';
+import type { CanvasMetadata, LoRA, RefImageState } from 'features/controlLayers/store/types';
+import { zCanvasMetadata, zCanvasReferenceImageState_OLD, zRefImageState } from 'features/controlLayers/store/types';
import type { ModelIdentifierField, ModelType } from 'features/nodes/types/common';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { zModelIdentifier } from 'features/nodes/types/v2/common';
import { modelSelected } from 'features/parameters/store/actions';
-import {
- videoAspectRatioChanged,
- videoDurationChanged,
- videoModelChanged,
- videoResolutionChanged,
-} from 'features/parameters/store/videoSlice';
import type {
ParameterCFGRescaleMultiplier,
ParameterCFGScale,
@@ -714,87 +694,6 @@ const VAEModel: SingleMetadataHandler = {
};
//#endregion VAEModel
-//#region VideoModel
-const VideoModel: SingleMetadataHandler = {
- [SingleMetadataKey]: true,
- type: 'VideoModel',
- parse: async (metadata, store) => {
- const raw = getProperty(metadata, 'model');
- const parsed = await parseModelIdentifier(raw, store, 'video');
- assert(parsed.type === 'video');
- return Promise.resolve(parsed);
- },
- recall: (value, store) => {
- store.dispatch(videoModelChanged({ videoModel: value }));
- },
- i18nKey: 'metadata.videoModel',
- LabelComponent: MetadataLabel,
- ValueComponent: ({ value }: SingleMetadataValueProps) => (
-
- ),
-};
-//#endregion VideoModel
-
-//#region VideoDuration
-const VideoDuration: SingleMetadataHandler = {
- [SingleMetadataKey]: true,
- type: 'VideoDuration',
- parse: (metadata) => {
- const raw = getProperty(metadata, 'duration');
- const parsed = zVideoDuration.parse(raw);
- return Promise.resolve(parsed);
- },
- recall: (value, store) => {
- store.dispatch(videoDurationChanged(value));
- },
- i18nKey: 'metadata.videoDuration',
- LabelComponent: MetadataLabel,
- ValueComponent: ({ value }: SingleMetadataValueProps) => (
-
- ),
-};
-//#endregion VideoDuration
-
-//#region VideoResolution
-const VideoResolution: SingleMetadataHandler = {
- [SingleMetadataKey]: true,
- type: 'VideoResolution',
- parse: (metadata) => {
- const raw = getProperty(metadata, 'resolution');
- const parsed = zVideoResolution.parse(raw);
- return Promise.resolve(parsed);
- },
- recall: (value, store) => {
- store.dispatch(videoResolutionChanged(value));
- },
- i18nKey: 'metadata.videoResolution',
- LabelComponent: MetadataLabel,
- ValueComponent: ({ value }: SingleMetadataValueProps) => (
-
- ),
-};
-//#endregion VideoResolution
-
-//#region VideoAspectRatio
-const VideoAspectRatio: SingleMetadataHandler = {
- [SingleMetadataKey]: true,
- type: 'VideoAspectRatio',
- parse: (metadata) => {
- const raw = getProperty(metadata, 'aspect_ratio');
- const parsed = zVideoAspectRatio.parse(raw);
- return Promise.resolve(parsed);
- },
- recall: (value, store) => {
- store.dispatch(videoAspectRatioChanged(value));
- },
- i18nKey: 'metadata.videoAspectRatio',
- LabelComponent: MetadataLabel,
- ValueComponent: ({ value }: SingleMetadataValueProps) => (
-
- ),
-};
-//#endregion VideoAspectRatio
-
//#region LoRAs
const LoRAs: CollectionMetadataHandler = {
[CollectionMetadataKey]: true,
@@ -1044,17 +943,6 @@ export const ImageMetadataHandlers = {
// ipAdapterToIPAdapterLayer: parseIPAdapterToIPAdapterLayer,
} as const;
-export const VideoMetadataHandlers = {
- CreatedBy,
- GenerationMode,
- PositivePrompt,
- VideoModel,
- Seed,
- VideoAspectRatio,
- VideoDuration,
- VideoResolution,
-};
-
const successToast = (parameter: string) => {
toast({
id: 'PARAMETER_SET',
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts
index f3a1d0c6434..dfab2d251f9 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts
+++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts
@@ -1,88 +1,48 @@
-import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
-import { useAppSelector } from 'app/store/storeHooks';
import { isNil } from 'es-toolkit/compat';
-import { selectConfigSlice } from 'features/system/store/configSlice';
import { useMemo } from 'react';
import type { MainModelConfig } from 'services/api/types';
-const initialStatesSelector = createMemoizedSelector(selectConfigSlice, (config) => {
- const { steps, guidance, scheduler, cfgRescaleMultiplier, vaePrecision, width, height } = config.sd;
- const { guidance: fluxGuidance } = config.flux;
-
- return {
- initialSteps: steps.initial,
- initialCfg: guidance.initial,
- initialScheduler: scheduler,
- initialCfgRescaleMultiplier: cfgRescaleMultiplier.initial,
- initialVaePrecision: vaePrecision,
- initialWidth: width.initial,
- initialHeight: height.initial,
- initialGuidance: fluxGuidance.initial,
- };
-});
-
export const useMainModelDefaultSettings = (modelConfig: MainModelConfig) => {
- const {
- initialSteps,
- initialCfg,
- initialScheduler,
- initialCfgRescaleMultiplier,
- initialVaePrecision,
- initialWidth,
- initialHeight,
- initialGuidance,
- } = useAppSelector(initialStatesSelector);
-
const defaultSettingsDefaults = useMemo(() => {
return {
vae: {
isEnabled: !isNil(modelConfig?.default_settings?.vae),
- value: modelConfig?.default_settings?.vae || 'default',
+ value: modelConfig?.default_settings?.vae ?? 'default',
},
vaePrecision: {
isEnabled: !isNil(modelConfig?.default_settings?.vae_precision),
- value: modelConfig?.default_settings?.vae_precision || initialVaePrecision || 'fp32',
+ value: modelConfig?.default_settings?.vae_precision ?? 'fp32',
},
scheduler: {
isEnabled: !isNil(modelConfig?.default_settings?.scheduler),
- value: modelConfig?.default_settings?.scheduler || initialScheduler || 'dpmpp_3m_k',
+ value: modelConfig?.default_settings?.scheduler ?? 'dpmpp_3m_k',
},
steps: {
isEnabled: !isNil(modelConfig?.default_settings?.steps),
- value: modelConfig?.default_settings?.steps || initialSteps,
+ value: modelConfig?.default_settings?.steps ?? 30,
},
cfgScale: {
isEnabled: !isNil(modelConfig?.default_settings?.cfg_scale),
- value: modelConfig?.default_settings?.cfg_scale || initialCfg,
+ value: modelConfig?.default_settings?.cfg_scale ?? 7,
},
cfgRescaleMultiplier: {
isEnabled: !isNil(modelConfig?.default_settings?.cfg_rescale_multiplier),
- value: modelConfig?.default_settings?.cfg_rescale_multiplier || initialCfgRescaleMultiplier,
+ value: modelConfig?.default_settings?.cfg_rescale_multiplier ?? 0,
},
width: {
isEnabled: !isNil(modelConfig?.default_settings?.width),
- value: modelConfig?.default_settings?.width || initialWidth,
+ value: modelConfig?.default_settings?.width ?? 512,
},
height: {
isEnabled: !isNil(modelConfig?.default_settings?.height),
- value: modelConfig?.default_settings?.height || initialHeight,
+ value: modelConfig?.default_settings?.height ?? 512,
},
guidance: {
isEnabled: !isNil(modelConfig?.default_settings?.guidance),
- value: modelConfig?.default_settings?.guidance || initialGuidance,
+ value: modelConfig?.default_settings?.guidance ?? 4,
},
};
- }, [
- modelConfig,
- initialVaePrecision,
- initialScheduler,
- initialSteps,
- initialCfg,
- initialCfgRescaleMultiplier,
- initialWidth,
- initialHeight,
- initialGuidance,
- ]);
+ }, [modelConfig]);
return defaultSettingsDefaults;
};
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx
index c3de0dd3fb5..f7e91af62fd 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx
@@ -1,6 +1,5 @@
import { Button, Text, useToast } from '@invoke-ai/ui-library';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { useCallback, useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
@@ -10,7 +9,6 @@ const TOAST_ID = 'starterModels';
export const useStarterModelsToast = () => {
const { t } = useTranslation();
- const isEnabled = useFeatureStatus('starterModels');
const [didToast, setDidToast] = useState(false);
const [mainModels, { data }] = useMainModels();
const toast = useToast();
@@ -23,7 +21,7 @@ export const useStarterModelsToast = () => {
toast.close(TOAST_ID);
}
}
- if (data && mainModels.length === 0 && !didToast && isEnabled) {
+ if (data && mainModels.length === 0 && !didToast) {
toast({
id: TOAST_ID,
title: t('modelManager.noModelsInstalled'),
@@ -34,7 +32,7 @@ export const useStarterModelsToast = () => {
onCloseComplete: () => setDidToast(true),
});
}
- }, [data, didToast, isEnabled, mainModels.length, t, toast]);
+ }, [data, didToast, mainModels.length, t, toast]);
};
const ToastDescription = () => {
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts
index 0b4096e010b..11b19b3937f 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts
+++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts
@@ -18,7 +18,6 @@ import {
isTIModelConfig,
isUnknownModelConfig,
isVAEModelConfig,
- isVideoModelConfig,
} from 'services/api/types';
import { objectEntries } from 'tsafe';
@@ -116,11 +115,6 @@ export const MODEL_CATEGORIES: Record =
i18nKey: 'modelManager.llavaOnevision',
filter: isLLaVAModelConfig,
},
- video: {
- category: 'video',
- i18nKey: 'Video',
- filter: isVideoModelConfig,
- },
};
export const MODEL_CATEGORIES_AS_LIST = objectEntries(MODEL_CATEGORIES).map(([category, { i18nKey, filter }]) => ({
@@ -141,13 +135,6 @@ export const MODEL_BASE_TO_COLOR: Record = {
'sdxl-refiner': 'invokeBlue',
flux: 'gold',
cogview4: 'red',
- imagen3: 'pink',
- imagen4: 'pink',
- 'chatgpt-4o': 'pink',
- 'flux-kontext': 'pink',
- 'gemini-2.5': 'pink',
- veo3: 'purple',
- runway: 'green',
unknown: 'red',
};
@@ -171,7 +158,6 @@ export const MODEL_TYPE_TO_LONG_NAME: Record = {
clip_embed: 'CLIP Embed',
siglip: 'SigLIP',
flux_redux: 'FLUX Redux',
- video: 'Video',
unknown: 'Unknown',
};
@@ -187,13 +173,6 @@ export const MODEL_BASE_TO_LONG_NAME: Record = {
'sdxl-refiner': 'Stable Diffusion XL Refiner',
flux: 'FLUX',
cogview4: 'CogView4',
- imagen3: 'Imagen3',
- imagen4: 'Imagen4',
- 'chatgpt-4o': 'ChatGPT 4o',
- 'flux-kontext': 'Flux Kontext',
- 'gemini-2.5': 'Gemini 2.5',
- veo3: 'Veo3',
- runway: 'Runway',
unknown: 'Unknown',
};
@@ -209,13 +188,6 @@ export const MODEL_BASE_TO_SHORT_NAME: Record = {
'sdxl-refiner': 'SDXLR',
flux: 'FLUX',
cogview4: 'CogView4',
- imagen3: 'Imagen3',
- imagen4: 'Imagen4',
- 'chatgpt-4o': 'ChatGPT 4o',
- 'flux-kontext': 'Flux Kontext',
- 'gemini-2.5': 'Gemini 2.5',
- veo3: 'Veo3',
- runway: 'Runway',
unknown: 'Unknown',
};
@@ -244,60 +216,11 @@ export const MODEL_FORMAT_TO_LONG_NAME: Record = {
bnb_quantized_int8b: 'BNB Quantized (int8b)',
bnb_quantized_nf4b: 'BNB Quantized (nf4b)',
gguf_quantized: 'GGUF Quantized',
- api: 'API',
unknown: 'Unknown',
};
-/**
- * List of base models that make API requests
- */
-export const API_BASE_MODELS: BaseModelType[] = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext', 'gemini-2.5'];
-
-export const SUPPORTS_SEED_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4'];
-
export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3'];
-export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = [
- 'sd-1',
- 'sdxl',
- 'flux',
- 'flux-kontext',
- 'chatgpt-4o',
- 'gemini-2.5',
-];
-
-export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [
- 'sd-1',
- 'sd-2',
- 'sdxl',
- 'cogview4',
- 'sd-3',
- 'imagen3',
- 'imagen4',
-];
-
-export const SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS: BaseModelType[] = [
- 'sd-1',
- 'sd-2',
- 'sd-3',
- 'sdxl',
- 'flux',
- 'cogview4',
-];
-
-export const SUPPORTS_ASPECT_RATIO_BASE_MODELS: BaseModelType[] = [
- 'sd-1',
- 'sd-2',
- 'sd-3',
- 'sdxl',
- 'flux',
- 'cogview4',
- 'imagen3',
- 'imagen4',
- 'flux-kontext',
- 'chatgpt-4o',
-];
-
-export const VIDEO_BASE_MODELS = ['veo3', 'runway'];
+export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = ['sd-1', 'sdxl', 'flux'];
-export const REQUIRES_STARTING_FRAME_BASE_MODELS = ['runway'];
+export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sdxl', 'cogview4', 'sd-3'];
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx
index cdf8bceaf97..5d68f3fdc5b 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx
@@ -8,8 +8,6 @@ import {
FormLabel,
Input,
} from '@invoke-ai/ui-library';
-import { skipToken } from '@reduxjs/toolkit/query';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { toast } from 'features/toast/toast';
import type { ChangeEvent } from 'react';
import { memo, useCallback, useMemo, useState } from 'react';
@@ -24,8 +22,7 @@ import { assert } from 'tsafe';
export const HFToken = () => {
const { t } = useTranslation();
- const isHFTokenEnabled = useFeatureStatus('hfToken');
- const { currentData } = useGetHFTokenStatusQuery(isHFTokenEnabled ? undefined : skipToken);
+ const { currentData } = useGetHFTokenStatusQuery();
const error = useMemo(() => {
switch (currentData) {
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx
index 0875256d463..ba1d80aa007 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx
@@ -1,6 +1,5 @@
import { Button, Flex, FormControl, FormErrorMessage, FormHelperText, FormLabel, Input } from '@invoke-ai/ui-library';
import { useInstallModel } from 'features/modelManagerV2/hooks/useInstallModel';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import type { ChangeEventHandler } from 'react';
import { memo, useCallback, useState } from 'react';
import { useTranslation } from 'react-i18next';
@@ -14,7 +13,6 @@ export const HuggingFaceForm = memo(() => {
const [displayResults, setDisplayResults] = useState(false);
const [errorMessage, setErrorMessage] = useState('');
const { t } = useTranslation();
- const isHFTokenEnabled = useFeatureStatus('hfToken');
const [_getHuggingFaceModels, { isLoading, data }] = useLazyGetHuggingFaceModelsQuery();
const [installModel] = useInstallModel();
@@ -66,7 +64,7 @@ export const HuggingFaceForm = memo(() => {
{t('modelManager.huggingFaceHelper')}
{!!errorMessage.length && {errorMessage}}
- {isHFTokenEnabled && }
+
{data && data.urls && displayResults && }
);
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx
index e139639f1f0..2d0192425dc 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx
@@ -17,7 +17,6 @@ const FORMAT_NAME_MAP: Record = {
bnb_quantized_int8b: 'bnb_quantized_int8b',
bnb_quantized_nf4b: 'quantized',
gguf_quantized: 'gguf',
- api: 'api',
omi: 'omi',
unknown: 'unknown',
olive: 'olive',
@@ -36,7 +35,6 @@ const FORMAT_COLOR_MAP: Record = {
bnb_quantized_int8b: 'base',
bnb_quantized_nf4b: 'base',
gguf_quantized: 'base',
- api: 'base',
unknown: 'red',
olive: 'base',
onnx: 'base',
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx
index 2fa9580e421..ca7684ae859 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx
@@ -1,8 +1,7 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle';
-import { selectCFGRescaleMultiplierConfig } from 'features/system/store/configSlice';
+import { CONSTRAINTS } from 'features/parameters/components/Advanced/ParamCFGRescaleMultiplier';
import { memo, useCallback, useMemo } from 'react';
import type { UseControllerProps } from 'react-hook-form';
import { useController } from 'react-hook-form';
@@ -15,12 +14,7 @@ type DefaultCfgRescaleMultiplierType = MainModelDefaultSettingsFormData['cfgResc
export const DefaultCfgRescaleMultiplier = memo((props: UseControllerProps) => {
const { field } = useController(props);
- const config = useAppSelector(selectCFGRescaleMultiplierConfig);
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback(
(v: number) => {
@@ -53,20 +47,20 @@ export const DefaultCfgRescaleMultiplier = memo((props: UseControllerProps
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx
index 78558243ae6..76f698b8f7b 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx
@@ -1,8 +1,7 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle';
-import { selectCFGScaleConfig } from 'features/system/store/configSlice';
+import { CONSTRAINTS, MARKS } from 'features/parameters/components/Core/ParamCFGScale';
import { memo, useCallback, useMemo } from 'react';
import type { UseControllerProps } from 'react-hook-form';
import { useController } from 'react-hook-form';
@@ -15,12 +14,7 @@ type DefaultCfgType = MainModelDefaultSettingsFormData['cfgScale'];
export const DefaultCfgScale = memo((props: UseControllerProps) => {
const { field } = useController(props);
- const config = useAppSelector(selectCFGScaleConfig);
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback(
(v: number) => {
@@ -53,20 +47,20 @@ export const DefaultCfgScale = memo((props: UseControllerProps
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx
index cb939a268cd..df8c62cbf8a 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx
@@ -1,8 +1,7 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle';
-import { selectGuidanceConfig } from 'features/system/store/configSlice';
+import { CONSTRAINTS, MARKS } from 'features/parameters/components/Core/ParamGuidance';
import { memo, useCallback, useMemo } from 'react';
import type { UseControllerProps } from 'react-hook-form';
import { useController } from 'react-hook-form';
@@ -15,16 +14,7 @@ type DefaultGuidanceType = MainModelDefaultSettingsFormData['guidance'];
export const DefaultGuidance = memo((props: UseControllerProps) => {
const { field } = useController(props);
- const config = useAppSelector(selectGuidanceConfig);
const { t } = useTranslation();
- const marks = useMemo(
- () => [
- config.sliderMin,
- Math.floor(config.sliderMax - (config.sliderMax - config.sliderMin) / 2),
- config.sliderMax,
- ],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback(
(v: number) => {
@@ -57,20 +47,20 @@ export const DefaultGuidance = memo((props: UseControllerProps
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx
index 78706b8223d..7603007d5a0 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx
@@ -1,8 +1,7 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle';
-import { selectHeightConfig } from 'features/system/store/configSlice';
+import { CONSTRAINTS } from 'features/parameters/components/Dimensions/DimensionsHeight';
import { memo, useCallback, useMemo } from 'react';
import type { UseControllerProps } from 'react-hook-form';
import { useController } from 'react-hook-form';
@@ -19,12 +18,8 @@ type Props = {
export const DefaultHeight = memo(({ control, optimalDimension }: Props) => {
const { field } = useController({ control, name: 'height' });
- const config = useAppSelector(selectHeightConfig);
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, optimalDimension, config.sliderMax],
- [config.sliderMin, optimalDimension, config.sliderMax]
- );
+ const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]);
const onChange = useCallback(
(v: number) => {
@@ -57,20 +52,20 @@ export const DefaultHeight = memo(({ control, optimalDimension }: Props) => {
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx
index 42afa6a1074..0e052f569df 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx
@@ -1,8 +1,7 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle';
-import { selectStepsConfig } from 'features/system/store/configSlice';
+import { CONSTRAINTS, MARKS } from 'features/parameters/components/Core/ParamSteps';
import { memo, useCallback, useMemo } from 'react';
import type { UseControllerProps } from 'react-hook-form';
import { useController } from 'react-hook-form';
@@ -15,12 +14,7 @@ type DefaultSteps = MainModelDefaultSettingsFormData['steps'];
export const DefaultSteps = memo((props: UseControllerProps) => {
const { field } = useController(props);
- const config = useAppSelector(selectStepsConfig);
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback(
(v: number) => {
@@ -53,20 +47,20 @@ export const DefaultSteps = memo((props: UseControllerProps
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx
index 66467617d02..4ffc6e8f8fb 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx
@@ -1,8 +1,7 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle';
-import { selectWidthConfig } from 'features/system/store/configSlice';
+import { CONSTRAINTS } from 'features/parameters/components/Dimensions/DimensionsWidth';
import { memo, useCallback, useMemo } from 'react';
import type { UseControllerProps } from 'react-hook-form';
import { useController } from 'react-hook-form';
@@ -19,12 +18,8 @@ type Props = {
export const DefaultWidth = memo(({ control, optimalDimension }: Props) => {
const { field } = useController({ control, name: 'width' });
- const config = useAppSelector(selectWidthConfig);
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, optimalDimension, config.sliderMax],
- [config.sliderMin, optimalDimension, config.sliderMax]
- );
+ const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]);
const onChange = useCallback(
(v: number) => {
@@ -57,20 +52,20 @@ export const DefaultWidth = memo(({ control, optimalDimension }: Props) => {
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx
index 55c683029fd..a9a502e7795 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx
@@ -19,7 +19,6 @@ import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
import { memoize } from 'es-toolkit/compat';
import { useBuildNode } from 'features/nodes/hooks/useBuildNode';
-import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import {
$addNodeCmdk,
$cursorPos,
@@ -147,7 +146,6 @@ export const AddNodeCmdk = memo(() => {
const [searchTerm, setSearchTerm] = useState('');
const addNode = useAddNode();
const tab = useAppSelector(selectActiveTab);
- const isLocked = useIsWorkflowEditorLocked();
// Filtering the list is expensive - debounce the search term to avoid stutters
const [debouncedSearchTerm] = useDebounce(searchTerm, 300);
const isOpen = useStore($addNodeCmdk);
@@ -162,8 +160,8 @@ export const AddNodeCmdk = memo(() => {
id: 'addNode',
category: 'workflows',
callback: open,
- options: { enabled: tab === 'workflows' && !isLocked, preventDefault: true },
- dependencies: [open, tab, isLocked],
+ options: { enabled: tab === 'workflows', preventDefault: true },
+ dependencies: [open, tab],
});
const onChange = useCallback((e: ChangeEvent) => {
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx
index c6f29095e44..f6474dec74b 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx
@@ -4,7 +4,6 @@ import type {
EdgeChange,
HandleType,
NodeChange,
- NodeMouseHandler,
OnEdgesChange,
OnInit,
OnMoveEnd,
@@ -23,10 +22,8 @@ import {
} from '@xyflow/react';
import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks';
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
-import { $isSelectingOutputNode, $outputNodeId } from 'features/nodes/components/sidePanel/workflow/publish';
import { useConnection } from 'features/nodes/hooks/useConnection';
import { useIsValidConnection } from 'features/nodes/hooks/useIsValidConnection';
-import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import { useNodeCopyPaste } from 'features/nodes/hooks/useNodeCopyPaste';
import {
$addNodeCmdk,
@@ -52,7 +49,7 @@ import {
import { connectionToEdge } from 'features/nodes/store/util/reactFlowUtil';
import { selectSelectionMode, selectShouldSnapToGrid } from 'features/nodes/store/workflowSettingsSlice';
import { NO_DRAG_CLASS, NO_PAN_CLASS, NO_WHEEL_CLASS } from 'features/nodes/types/constants';
-import { type AnyEdge, type AnyNode, isInvocationNode } from 'features/nodes/types/invocation';
+import type { AnyEdge, AnyNode } from 'features/nodes/types/invocation';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import type { CSSProperties, MouseEvent } from 'react';
import { memo, useCallback, useMemo, useRef } from 'react';
@@ -94,7 +91,6 @@ export const Flow = memo(() => {
const flowWrapper = useRef(null);
const isValidConnection = useIsValidConnection();
const updateNodeInternals = useUpdateNodeInternals();
- const isLocked = useIsWorkflowEditorLocked();
useFocusRegion('workflows', flowWrapper);
@@ -212,18 +208,6 @@ export const Flow = memo(() => {
// #endregion
- const onNodeClick = useCallback>((e, node) => {
- if (!$isSelectingOutputNode.get()) {
- return;
- }
- if (!isInvocationNode(node)) {
- return;
- }
- const { id } = node.data;
- $outputNodeId.set(id);
- $isSelectingOutputNode.set(false);
- }, []);
-
return (
<>
@@ -235,7 +219,6 @@ export const Flow = memo(() => {
nodes={nodes}
edges={edges}
onInit={onInit}
- onNodeClick={onNodeClick}
onMouseMove={onMouseMove}
onNodesChange={onNodesChange}
onEdgesChange={onEdgesChange}
@@ -248,12 +231,6 @@ export const Flow = memo(() => {
onMoveEnd={handleMoveEnd}
connectionLineComponent={CustomConnectionLine}
isValidConnection={isValidConnection}
- edgesFocusable={!isLocked}
- edgesReconnectable={!isLocked}
- nodesDraggable={!isLocked}
- nodesConnectable={!isLocked}
- nodesFocusable={!isLocked}
- elementsSelectable={!isLocked}
minZoom={0.1}
snapToGrid={shouldSnapToGrid}
snapGrid={snapGrid}
@@ -279,8 +256,6 @@ export const Flow = memo(() => {
Flow.displayName = 'Flow';
const HotkeyIsolator = memo(() => {
- const isLocked = useIsWorkflowEditorLocked();
-
const mayUndo = useAppSelector(selectMayUndo);
const mayRedo = useAppSelector(selectMayRedo);
@@ -295,7 +270,7 @@ const HotkeyIsolator = memo(() => {
id: 'copySelection',
category: 'workflows',
callback: copySelection,
- options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true },
+ options: { enabled: isWorkflowsFocused, preventDefault: true },
dependencies: [copySelection],
});
@@ -324,24 +299,24 @@ const HotkeyIsolator = memo(() => {
id: 'selectAll',
category: 'workflows',
callback: selectAll,
- options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true },
- dependencies: [selectAll, isWorkflowsFocused, isLocked],
+ options: { enabled: isWorkflowsFocused, preventDefault: true },
+ dependencies: [selectAll, isWorkflowsFocused],
});
useRegisteredHotkeys({
id: 'pasteSelection',
category: 'workflows',
callback: pasteSelection,
- options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true },
- dependencies: [pasteSelection, isLocked, isWorkflowsFocused],
+ options: { enabled: isWorkflowsFocused, preventDefault: true },
+ dependencies: [pasteSelection, isWorkflowsFocused],
});
useRegisteredHotkeys({
id: 'pasteSelectionWithEdges',
category: 'workflows',
callback: pasteSelectionWithEdges,
- options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true },
- dependencies: [pasteSelectionWithEdges, isLocked, isWorkflowsFocused],
+ options: { enabled: isWorkflowsFocused, preventDefault: true },
+ dependencies: [pasteSelectionWithEdges, isWorkflowsFocused],
});
useRegisteredHotkeys({
@@ -350,8 +325,8 @@ const HotkeyIsolator = memo(() => {
callback: () => {
store.dispatch(undo());
},
- options: { enabled: isWorkflowsFocused && !isLocked && mayUndo, preventDefault: true },
- dependencies: [store, mayUndo, isLocked, isWorkflowsFocused],
+ options: { enabled: isWorkflowsFocused && mayUndo, preventDefault: true },
+ dependencies: [store, mayUndo, isWorkflowsFocused],
});
useRegisteredHotkeys({
@@ -360,8 +335,8 @@ const HotkeyIsolator = memo(() => {
callback: () => {
store.dispatch(redo());
},
- options: { enabled: isWorkflowsFocused && !isLocked && mayRedo, preventDefault: true },
- dependencies: [store, mayRedo, isLocked, isWorkflowsFocused],
+ options: { enabled: isWorkflowsFocused && mayRedo, preventDefault: true },
+ dependencies: [store, mayRedo, isWorkflowsFocused],
});
const onEscapeHotkey = useCallback(() => {
@@ -398,8 +373,8 @@ const HotkeyIsolator = memo(() => {
id: 'deleteSelection',
category: 'workflows',
callback: deleteSelection,
- options: { preventDefault: true, enabled: isWorkflowsFocused && !isLocked },
- dependencies: [deleteSelection, isWorkflowsFocused, isLocked],
+ options: { preventDefault: true, enabled: isWorkflowsFocused },
+ dependencies: [deleteSelection, isWorkflowsFocused],
});
return null;
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx
index 2e77415757c..c8423a8fe4e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx
@@ -19,7 +19,7 @@ import { $lastProgressEvent } from 'services/events/stores';
const CurrentImageNode = (props: NodeProps) => {
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
const lastProgressEvent = useStore($lastProgressEvent);
- const imageDTO = useImageDTO(lastSelectedItem?.id);
+ const imageDTO = useImageDTO(lastSelectedItem);
if (lastProgressEvent?.image) {
return (
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx
index 851b85880f2..890666b0c4e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx
@@ -3,7 +3,6 @@ import { Flex, FormControlGroup } from '@invoke-ai/ui-library';
import { useIsExecutableNode } from 'features/nodes/hooks/useIsBatchNode';
import { useNodeHasImageOutput } from 'features/nodes/hooks/useNodeHasImageOutput';
import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo } from 'react';
import SaveToGalleryCheckbox from './SaveToGalleryCheckbox';
@@ -18,7 +17,6 @@ const props: ChakraProps = { w: 'unset' };
const InvocationNodeFooter = ({ nodeId }: Props) => {
const hasImageOutput = useNodeHasImageOutput();
const isExecutableNode = useIsExecutableNode();
- const isCacheEnabled = useFeatureStatus('invocationCache');
return (
{
justifyContent="space-between"
>
- {isExecutableNode && isCacheEnabled && }
+ {isExecutableNode && }
{isExecutableNode && hasImageOutput && }
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx
index 93b2518ca03..ba3282459fd 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx
@@ -8,7 +8,6 @@ import {
useIsConnectionStartField,
} from 'features/nodes/hooks/useFieldConnectionState';
import { useInputFieldTemplateOrThrow } from 'features/nodes/hooks/useInputFieldTemplateOrThrow';
-import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import { useFieldTypeName } from 'features/nodes/hooks/usePrettyFieldType';
import { HANDLE_TOOLTIP_OPEN_DELAY } from 'features/nodes/types/constants';
import type { FieldInputTemplate } from 'features/nodes/types/field';
@@ -106,16 +105,9 @@ type HandleCommonProps = {
};
const IdleHandle = memo(({ fieldTemplate, fieldTypeName, fieldColor, isModelField }: HandleCommonProps) => {
- const isLocked = useIsWorkflowEditorLocked();
return (
-
+
{
if (connectionError !== null) {
@@ -149,13 +140,7 @@ const ConnectionInProgressHandle = memo(
return (
-
+
{
- const isLocked = useIsWorkflowEditorLocked();
-
return (
-
+
{
if (connectionErrorTKey !== null) {
@@ -150,13 +140,7 @@ const ConnectionInProgressHandle = memo(
return (
-
+
{
const mouseOverNode = useMouseOverNode(nodeId);
const mouseOverFormField = useMouseOverFormField(nodeId);
const zoomToNode = useZoomToNode(nodeId);
- const isLocked = useIsWorkflowEditorLocked();
const isInvalid = useNodeHasErrors();
const hasError = isMissingTemplate || isInvalid;
@@ -74,7 +72,6 @@ const NodeWrapper = (props: NodeWrapperProps) => {
sx={containerSx}
width={width || NODE_WIDTH}
opacity={opacity}
- data-is-editor-locked={isLocked}
data-is-selected={selected}
data-is-mouse-over-form-field={mouseOverFormField.isMouseOverFormField}
data-status={hasError ? 'error' : needsUpdate ? 'warning' : undefined}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx
index 885c4e5f146..7e2cde7093f 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx
@@ -1,7 +1,6 @@
import type { ChakraProps } from '@invoke-ai/ui-library';
import { Box, useGlobalMenuClose } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
-import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import { useMouseOverNode } from 'features/nodes/hooks/useMouseOverNode';
import { useNodeExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
import { useZoomToNode } from 'features/nodes/hooks/useZoomToNode';
@@ -23,7 +22,6 @@ const NonInvocationNodeWrapper = (props: NonInvocationNodeWrapperProps) => {
const { nodeId, width, children, selected } = props;
const mouseOverNode = useMouseOverNode(nodeId);
const zoomToNode = useZoomToNode(nodeId);
- const isLocked = useIsWorkflowEditorLocked();
const executionState = useNodeExecutionState(nodeId);
const isInProgress = executionState?.status === zNodeStatus.enum.IN_PROGRESS;
@@ -66,7 +64,6 @@ const NonInvocationNodeWrapper = (props: NonInvocationNodeWrapperProps) => {
sx={containerSx}
width={width || NODE_WIDTH}
opacity={opacity}
- data-is-editor-locked={isLocked}
data-is-selected={selected}
>
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts
index 721c816b198..70e56cb4db6 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts
@@ -56,12 +56,6 @@ export const containerSx: SystemStyleObject = {
display: 'block',
shadow: '0 0 0 2px var(--border-color-selected)',
},
- '&[data-is-editor-locked="true"]': {
- '& *': {
- cursor: 'not-allowed',
- pointerEvents: 'none',
- },
- },
};
export const shadowsSx: SystemStyleObject = {
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx
index 7320c1fce77..2aaa79243c0 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx
@@ -1,61 +1,15 @@
-import { Alert, AlertDescription, AlertIcon, AlertTitle, Box, Flex } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
+import { Flex } from '@invoke-ai/ui-library';
import AddNodeButton from 'features/nodes/components/flow/panels/TopPanel/AddNodeButton';
import UpdateNodesButton from 'features/nodes/components/flow/panels/TopPanel/UpdateNodesButton';
-import {
- $isInPublishFlow,
- $isSelectingOutputNode,
- useIsValidationRunInProgress,
- useIsWorkflowPublished,
-} from 'features/nodes/components/sidePanel/workflow/publish';
-import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
export const TopLeftPanel = memo(() => {
- const isLocked = useIsWorkflowEditorLocked();
- const isInPublishFlow = useStore($isInPublishFlow);
- const isPublished = useIsWorkflowPublished();
- const isValidationRunInProgress = useIsValidationRunInProgress();
- const isSelectingOutputNode = useStore($isSelectingOutputNode);
-
- const { t } = useTranslation();
return (
- {!isLocked && (
-
-
-
-
- )}
- {isLocked && (
-
-
-
- {t('workflows.builder.workflowLocked')}
- {isValidationRunInProgress && (
-
- {t('workflows.builder.publishingValidationRunInProgress')}
-
- )}
- {isInPublishFlow && !isValidationRunInProgress && !isSelectingOutputNode && (
-
- {t('workflows.builder.workflowLockedDuringPublishing')}
-
- )}
- {isInPublishFlow && !isValidationRunInProgress && isSelectingOutputNode && (
-
- {t('workflows.builder.selectingOutputNodeDesc')}
-
- )}
- {isPublished && (
-
- {t('workflows.builder.workflowLockedPublished')}
-
- )}
-
-
- )}
+
+
+
+
);
});
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx
index af778d3a9fb..5d4977db8ef 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx
@@ -2,21 +2,15 @@ import { Flex, IconButton } from '@invoke-ai/ui-library';
import ClearFlowButton from 'features/nodes/components/flow/panels/TopPanel/ClearFlowButton';
import SaveWorkflowButton from 'features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton';
import { useWorkflowEditorSettingsModal } from 'features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings';
-import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiGearSixFill } from 'react-icons/pi';
export const TopRightPanel = memo(() => {
const modal = useWorkflowEditorSettingsModal();
- const isLocked = useIsWorkflowEditorLocked();
const { t } = useTranslation();
- if (isLocked) {
- return null;
- }
-
return (
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx
deleted file mode 100644
index d9df276f1b1..00000000000
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx
+++ /dev/null
@@ -1,50 +0,0 @@
-import { Button, Flex, Heading, Text } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectWorkflowId } from 'features/nodes/store/selectors';
-import { toast } from 'features/toast/toast';
-import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow';
-import { memo, useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiCopyBold, PiLockOpenBold } from 'react-icons/pi';
-import { useUnpublishWorkflowMutation } from 'services/api/endpoints/workflows';
-
-export const PublishedWorkflowPanelContent = memo(() => {
- const { t } = useTranslation();
- const saveAs = useSaveOrSaveAsWorkflow();
- const [unpublishWorkflow] = useUnpublishWorkflowMutation();
- const workflowId = useAppSelector(selectWorkflowId);
-
- const handleUnpublish = useCallback(async () => {
- if (workflowId) {
- try {
- await unpublishWorkflow(workflowId).unwrap();
- toast({
- title: t('toast.workflowUnpublished'),
- status: 'success',
- });
- } catch {
- toast({
- title: t('toast.problemUnpublishingWorkflow'),
- description: t('toast.problemUnpublishingWorkflowDescription'),
- status: 'error',
- });
- }
- }
- }, [unpublishWorkflow, workflowId, t]);
-
- return (
-
-
- {t('workflows.builder.workflowLocked')}
-
- {t('workflows.builder.publishedWorkflowsLocked')}
- }>
- {t('common.saveAs')}
-
- }>
- {t('workflows.builder.unpublish')}
-
-
- );
-});
-PublishedWorkflowPanelContent.displayName = 'PublishedWorkflowPanelContent';
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx
index dc8af90b176..f947e3165a8 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx
@@ -1,6 +1,5 @@
import { Flex, Spacer } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
-import { useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish';
import { WorkflowListMenuTrigger } from 'features/nodes/components/sidePanel/WorkflowListMenu/WorkflowListMenuTrigger';
import { WorkflowViewEditToggleButton } from 'features/nodes/components/sidePanel/WorkflowViewEditToggleButton';
import { selectWorkflowMode } from 'features/nodes/store/workflowLibrarySlice';
@@ -11,13 +10,12 @@ import SaveWorkflowButton from './SaveWorkflowButton';
export const ActiveWorkflowNameAndActions = memo(() => {
const mode = useAppSelector(selectWorkflowMode);
- const isPublished = useIsWorkflowPublished();
return (
- {mode === 'edit' && !isPublished && }
+ {mode === 'edit' && }
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx
index 9f60a1d7a59..a31b71a4d44 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx
@@ -1,10 +1,6 @@
import { Flex } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { EditModeLeftPanelContent } from 'features/nodes/components/sidePanel/EditModeLeftPanelContent';
-import { PublishedWorkflowPanelContent } from 'features/nodes/components/sidePanel/PublishedWorkflowPanelContent';
-import { $isInPublishFlow, useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish';
-import { PublishWorkflowPanelContent } from 'features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent';
import { ActiveWorkflowDescription } from 'features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowDescription';
import { ActiveWorkflowNameAndActions } from 'features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions';
import { selectWorkflowMode } from 'features/nodes/store/workflowLibrarySlice';
@@ -15,19 +11,15 @@ import { ViewModeLeftPanelContent } from './viewMode/ViewModeLeftPanelContent';
const WorkflowsTabLeftPanel = () => {
const mode = useAppSelector(selectWorkflowMode);
- const isPublished = useIsWorkflowPublished();
- const isInPublishFlow = useStore($isInPublishFlow);
return (
- {isInPublishFlow && }
- {!isInPublishFlow && }
- {!isInPublishFlow && !isPublished && mode === 'view' && }
- {!isInPublishFlow && !isPublished && mode === 'view' && }
- {!isInPublishFlow && !isPublished && mode === 'edit' && }
- {isPublished && }
+
+ {mode === 'view' && }
+ {mode === 'view' && }
+ {mode === 'edit' && }
);
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx
index aacfba41c42..9fa81f5a1ec 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx
@@ -108,7 +108,7 @@ const nodeFieldOverlaySx: SystemStyleObject = {
},
};
-export const NodeFieldElementOverlay = memo(({ nodeId }: { nodeId: string }) => {
+const NodeFieldElementOverlay = memo(({ nodeId }: { nodeId: string }) => {
const mouseOverNode = useMouseOverNode(nodeId);
const mouseOverFormField = useMouseOverFormField(nodeId);
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx
index 0b38dd014c7..60b1dc66331 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx
@@ -44,9 +44,8 @@ const queryOptions = {
if (!currentData) {
return { serverWorkflowHash: null };
}
- const { is_published: _is_published, ...serverWorkflow } = currentData.workflow;
return {
- serverWorkflowHash: stableHash(serverWorkflow),
+ serverWorkflowHash: stableHash(currentData.workflow),
};
},
} satisfies Parameters[1];
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx
deleted file mode 100644
index 1f90716819b..00000000000
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx
+++ /dev/null
@@ -1,475 +0,0 @@
-import type { ButtonProps } from '@invoke-ai/ui-library';
-import {
- Button,
- ButtonGroup,
- Divider,
- Flex,
- ListItem,
- Spacer,
- Text,
- Tooltip,
- UnorderedList,
-} from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { logger } from 'app/logging/logger';
-import { $projectUrl } from 'app/store/nanostores/projectId';
-import { useAppSelector } from 'app/store/storeHooks';
-import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
-import { withResultAsync } from 'common/util/result';
-import { parseify } from 'common/util/serialize';
-import { ExternalLink } from 'features/gallery/components/ImageViewer/NoContentForViewer';
-import { InvocationNodeContextProvider } from 'features/nodes/components/flow/nodes/Invocation/context';
-import { NodeFieldElementOverlay } from 'features/nodes/components/sidePanel/builder/NodeFieldElementEditMode';
-import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
-import {
- $isInPublishFlow,
- $isPublishing,
- $isReadyToDoValidationRun,
- $isSelectingOutputNode,
- $outputNodeId,
- $validationRunData,
- selectHasUnpublishableNodes,
- usePublishInputs,
-} from 'features/nodes/components/sidePanel/workflow/publish';
-import { useInputFieldTemplateTitleOrThrow } from 'features/nodes/hooks/useInputFieldTemplateTitleOrThrow';
-import { useInputFieldUserTitleOrThrow } from 'features/nodes/hooks/useInputFieldUserTitleOrThrow';
-import { useMouseOverFormField } from 'features/nodes/hooks/useMouseOverNode';
-import { useNodeTemplateTitleOrThrow } from 'features/nodes/hooks/useNodeTemplateTitleOrThrow';
-import { useNodeUserTitleOrThrow } from 'features/nodes/hooks/useNodeUserTitleOrThrow';
-import { useOutputFieldNames } from 'features/nodes/hooks/useOutputFieldNames';
-import { useOutputFieldTemplate } from 'features/nodes/hooks/useOutputFieldTemplate';
-import { useZoomToNode } from 'features/nodes/hooks/useZoomToNode';
-import { useEnqueueWorkflows } from 'features/queue/hooks/useEnqueueWorkflows';
-import { $isReadyToEnqueue } from 'features/queue/store/readiness';
-import { selectAllowPublishWorkflows } from 'features/system/store/configSlice';
-import { toast } from 'features/toast/toast';
-import type { PropsWithChildren } from 'react';
-import { memo, useCallback, useMemo } from 'react';
-import { Trans, useTranslation } from 'react-i18next';
-import { PiArrowLineRightBold, PiLightningFill, PiXBold } from 'react-icons/pi';
-import { serializeError } from 'serialize-error';
-import { assert } from 'tsafe';
-
-const log = logger('generation');
-
-export const PublishWorkflowPanelContent = memo(() => {
- return (
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- );
-});
-PublishWorkflowPanelContent.displayName = 'PublishWorkflowPanelContent';
-
-const OutputFields = memo(() => {
- const { t } = useTranslation();
- const outputNodeId = useStore($outputNodeId);
-
- return (
-
-
- {t('workflows.builder.publishedWorkflowOutputs')}
-
-
-
-
-
- {!outputNodeId && (
-
- {t('workflows.builder.noOutputNodeSelected')}
-
- )}
- {outputNodeId && (
-
-
-
- )}
-
- );
-});
-OutputFields.displayName = 'OutputFields';
-
-const OutputFieldsContent = memo(({ outputNodeId }: { outputNodeId: string }) => {
- const outputFieldNames = useOutputFieldNames();
-
- return (
- <>
- {outputFieldNames.map((fieldName) => (
-
- ))}
- >
- );
-});
-OutputFieldsContent.displayName = 'OutputFieldsContent';
-
-const PublishableInputFields = memo(() => {
- const { t } = useTranslation();
- const inputs = usePublishInputs();
-
- if (inputs.publishable.length === 0) {
- return (
-
-
- {t('workflows.builder.noPublishableInputs')}
-
-
- );
- }
-
- return (
-
- {t('workflows.builder.publishedWorkflowInputs')}
-
- {inputs.publishable.map(({ nodeId, fieldName }) => {
- return (
-
-
-
- );
- })}
-
- );
-});
-PublishableInputFields.displayName = 'PublishableInputFields';
-
-const UnpublishableInputFields = memo(() => {
- const { t } = useTranslation();
- const inputs = usePublishInputs();
-
- if (inputs.unpublishable.length === 0) {
- return null;
- }
-
- return (
-
-
- {t('workflows.builder.unpublishableInputs')}
-
-
- {inputs.unpublishable.map(({ nodeId, fieldName }) => {
- return (
-
-
-
- );
- })}
-
- );
-});
-UnpublishableInputFields.displayName = 'UnpublishableInputFields';
-
-const SelectOutputNodeButton = memo((props: ButtonProps) => {
- const { t } = useTranslation();
- const outputNodeId = useStore($outputNodeId);
- const isSelectingOutputNode = useStore($isSelectingOutputNode);
- const onClick = useCallback(() => {
- $outputNodeId.set(null);
- $isSelectingOutputNode.set(true);
- }, []);
- return (
- }
- isDisabled={isSelectingOutputNode}
- tooltip={isSelectingOutputNode ? t('workflows.builder.selectingOutputNodeDesc') : undefined}
- onClick={onClick}
- {...props}
- >
- {isSelectingOutputNode
- ? t('workflows.builder.selectingOutputNode')
- : outputNodeId
- ? t('workflows.builder.changeOutputNode')
- : t('workflows.builder.selectOutputNode')}
-
- );
-});
-SelectOutputNodeButton.displayName = 'SelectOutputNodeButton';
-
-const CancelPublishButton = memo(() => {
- const { t } = useTranslation();
- const isPublishing = useStore($isPublishing);
- const onClick = useCallback(() => {
- $isInPublishFlow.set(false);
- $isSelectingOutputNode.set(false);
- $outputNodeId.set(null);
- }, []);
- return (
- } onClick={onClick} isDisabled={isPublishing}>
- {t('common.cancel')}
-
- );
-});
-CancelPublishButton.displayName = 'CancelDeployButton';
-
-const PublishWorkflowButton = memo(() => {
- const { t } = useTranslation();
- const isPublishing = useStore($isPublishing);
- const isReadyToDoValidationRun = useStore($isReadyToDoValidationRun);
- const isReadyToEnqueue = useStore($isReadyToEnqueue);
- const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
- const hasUnpublishableNodes = useAppSelector(selectHasUnpublishableNodes);
- const outputNodeId = useStore($outputNodeId);
- const isSelectingOutputNode = useStore($isSelectingOutputNode);
- const inputs = usePublishInputs();
- const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows);
-
- const projectUrl = useStore($projectUrl);
-
- const enqueue = useEnqueueWorkflows();
- const onClick = useCallback(async () => {
- $isPublishing.set(true);
- const result = await withResultAsync(() => enqueue(true, true));
- if (result.isErr()) {
- toast({
- id: 'TOAST_PUBLISH_FAILED',
- status: 'error',
- title: t('workflows.builder.publishFailed'),
- description: t('workflows.builder.publishFailedDesc'),
- duration: null,
- });
- log.error({ error: serializeError(result.error) }, 'Failed to enqueue batch');
- } else {
- toast({
- id: 'TOAST_PUBLISH_SUCCESSFUL',
- status: 'success',
- title: t('workflows.builder.publishSuccess'),
- description: (
- ,
- }}
- />
- ),
- duration: null,
- });
- assert(result.value.enqueueResult.batch.batch_id);
- assert(result.value.batchConfig.validation_run_data);
- $validationRunData.set({
- batchId: result.value.enqueueResult.batch.batch_id,
- workflowId: result.value.batchConfig.validation_run_data.workflow_id,
- });
- log.debug(parseify(result.value), 'Enqueued batch');
- }
- $isPublishing.set(false);
- }, [enqueue, projectUrl, t]);
-
- const isDisabled = useMemo(() => {
- return (
- !allowPublishWorkflows ||
- !isReadyToEnqueue ||
- doesWorkflowHaveUnsavedChanges ||
- hasUnpublishableNodes ||
- !isReadyToDoValidationRun ||
- !(outputNodeId !== null && !isSelectingOutputNode) ||
- isPublishing
- );
- }, [
- allowPublishWorkflows,
- doesWorkflowHaveUnsavedChanges,
- hasUnpublishableNodes,
- isReadyToDoValidationRun,
- isReadyToEnqueue,
- isSelectingOutputNode,
- outputNodeId,
- isPublishing,
- ]);
-
- return (
- 0}
- hasUnpublishableInputs={inputs.unpublishable.length > 0}
- >
- } isDisabled={isDisabled} onClick={onClick}>
- {isPublishing ? t('workflows.builder.publishing') : t('workflows.builder.publish')}
-
-
- );
-});
-PublishWorkflowButton.displayName = 'DoValidationRunButton';
-
-const NodeInputFieldPreview = memo(({ nodeId, fieldName }: { nodeId: string; fieldName: string }) => {
- const mouseOverFormField = useMouseOverFormField(nodeId);
- const nodeUserTitle = useNodeUserTitleOrThrow();
- const nodeTemplateTitle = useNodeTemplateTitleOrThrow();
- const fieldUserTitle = useInputFieldUserTitleOrThrow(fieldName);
- const fieldTemplateTitle = useInputFieldTemplateTitleOrThrow(fieldName);
- const zoomToNode = useZoomToNode(nodeId);
-
- return (
-
- {`${nodeUserTitle || nodeTemplateTitle} -> ${fieldUserTitle || fieldTemplateTitle}`}
- {`${nodeId} -> ${fieldName}`}
-
-
- );
-});
-NodeInputFieldPreview.displayName = 'NodeInputFieldPreview';
-
-const NodeOutputFieldPreview = memo(({ nodeId, fieldName }: { nodeId: string; fieldName: string }) => {
- const mouseOverFormField = useMouseOverFormField(nodeId);
- const nodeUserTitle = useNodeUserTitleOrThrow();
- const nodeTemplateTitle = useNodeTemplateTitleOrThrow();
- const fieldTemplate = useOutputFieldTemplate(fieldName);
- const zoomToNode = useZoomToNode(nodeId);
-
- return (
-
- {`${nodeUserTitle || nodeTemplateTitle} -> ${fieldTemplate.title}`}
- {`${nodeId} -> ${fieldName}`}
-
-
- );
-});
-NodeOutputFieldPreview.displayName = 'NodeOutputFieldPreview';
-
-export const StartPublishFlowButton = memo(() => {
- const { t } = useTranslation();
- const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows);
- const isReadyToEnqueue = useStore($isReadyToEnqueue);
- const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
- const hasUnpublishableNodes = useAppSelector(selectHasUnpublishableNodes);
- const inputs = usePublishInputs();
-
- const onClick = useCallback(() => {
- $isInPublishFlow.set(true);
- }, []);
-
- const isDisabled = useMemo(() => {
- return !allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes;
- }, [allowPublishWorkflows, doesWorkflowHaveUnsavedChanges, hasUnpublishableNodes, isReadyToEnqueue]);
-
- return (
- 0}
- hasUnpublishableInputs={inputs.unpublishable.length > 0}
- >
- } variant="ghost" size="sm" isDisabled={isDisabled}>
- {t('workflows.builder.publish')}
-
-
- );
-});
-
-StartPublishFlowButton.displayName = 'StartPublishFlowButton';
-
-const PublishTooltip = memo(
- ({
- isWorkflowSaved,
- hasUnpublishableNodes,
- isReadyToEnqueue,
- hasOutputNode,
- hasPublishableInputs,
- hasUnpublishableInputs,
- children,
- }: PropsWithChildren<{
- isWorkflowSaved: boolean;
- hasUnpublishableNodes: boolean;
- isReadyToEnqueue: boolean;
- hasOutputNode: boolean;
- hasPublishableInputs: boolean;
- hasUnpublishableInputs: boolean;
- }>) => {
- const { t } = useTranslation();
- const warnings = useMemo(() => {
- const _warnings: string[] = [];
- if (!hasPublishableInputs) {
- _warnings.push(t('workflows.builder.warningWorkflowHasNoPublishableInputFields'));
- }
- if (hasUnpublishableInputs) {
- _warnings.push(t('workflows.builder.warningWorkflowHasUnpublishableInputFields'));
- }
- return _warnings;
- }, [hasPublishableInputs, hasUnpublishableInputs, t]);
- const errors = useMemo(() => {
- const _errors: string[] = [];
- if (!isWorkflowSaved) {
- _errors.push(t('workflows.builder.errorWorkflowHasUnsavedChanges'));
- }
- if (hasUnpublishableNodes) {
- _errors.push(t('workflows.builder.errorWorkflowHasUnpublishableNodes'));
- }
- if (!isReadyToEnqueue) {
- _errors.push(t('workflows.builder.errorWorkflowHasInvalidGraph'));
- }
- if (!hasOutputNode) {
- _errors.push(t('workflows.builder.errorWorkflowHasNoOutputNode'));
- }
- return _errors;
- }, [hasUnpublishableNodes, hasOutputNode, isReadyToEnqueue, isWorkflowSaved, t]);
-
- if (errors.length === 0 && warnings.length === 0) {
- return children;
- }
-
- return (
-
- {errors.length > 0 && (
- <>
-
- {t('workflows.builder.cannotPublish')}:
-
-
- {errors.map((problem, index) => (
- {problem}
- ))}
-
- >
- )}
- {warnings.length > 0 && (
- <>
-
- {t('workflows.builder.publishWarnings')}:
-
-
- {warnings.map((problem, index) => (
- {problem}
- ))}
-
- >
- )}
-
- }
- >
- {children}
-
- );
- }
-);
-PublishTooltip.displayName = 'PublishTooltip';
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx
deleted file mode 100644
index b88a877e3dd..00000000000
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx
+++ /dev/null
@@ -1,93 +0,0 @@
-import {
- Button,
- Flex,
- Heading,
- IconButton,
- Modal,
- ModalBody,
- ModalCloseButton,
- ModalContent,
- ModalFooter,
- ModalHeader,
- ModalOverlay,
- Text,
-} from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $projectUrl } from 'app/store/nanostores/projectId';
-import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
-import { useClipboard } from 'common/hooks/useClipboard';
-import { toast } from 'features/toast/toast';
-import { atom } from 'nanostores';
-import { useCallback, useMemo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiCopyBold } from 'react-icons/pi';
-import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types';
-
-const $workflowToShare = atom(null);
-const clearWorkflowToShare = () => $workflowToShare.set(null);
-
-export const useShareWorkflow = () => {
- const copyWorkflowLink = useCallback((workflow: WorkflowRecordListItemWithThumbnailDTO) => {
- $workflowToShare.set(workflow);
- }, []);
-
- return copyWorkflowLink;
-};
-
-export const ShareWorkflowModal = () => {
- useAssertSingleton('ShareWorkflowModal');
- const workflowToShare = useStore($workflowToShare);
- const projectUrl = useStore($projectUrl);
- const { t } = useTranslation();
- const clipboard = useClipboard();
- const workflowLink = useMemo(() => {
- if (!workflowToShare || !projectUrl) {
- return null;
- }
- return `${window.location.origin}${projectUrl}/studio?selectedWorkflowId=${workflowToShare.workflow_id}`;
- }, [projectUrl, workflowToShare]);
-
- const handleCopy = useCallback(() => {
- if (!workflowLink) {
- return;
- }
- clipboard.writeText(workflowLink, () => {
- toast({
- status: 'success',
- title: t('toast.linkCopied'),
- });
- });
- $workflowToShare.set(null);
- }, [workflowLink, clipboard, t]);
-
- return (
-
-
-
-
-
- {t('workflows.copyShareLinkForWorkflow')}
- {workflowToShare?.name}
-
-
-
-
-
- {workflowLink}
- }
- onClick={handleCopy}
- />
-
-
-
-
-
-
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx
deleted file mode 100644
index 4b08b72bf77..00000000000
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import { IconButton, Tooltip } from '@invoke-ai/ui-library';
-import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiLockBold } from 'react-icons/pi';
-
-export const LockedWorkflowIcon = memo(() => {
- const { t } = useTranslation();
-
- return (
-
- }
- />
-
- );
-});
-
-LockedWorkflowIcon.displayName = 'LockedWorkflowIcon';
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx
deleted file mode 100644
index 971e9eca78a..00000000000
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx
+++ /dev/null
@@ -1,35 +0,0 @@
-import { IconButton, Tooltip } from '@invoke-ai/ui-library';
-import { useShareWorkflow } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal';
-import type { MouseEvent } from 'react';
-import { memo, useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiShareFatBold } from 'react-icons/pi';
-import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types';
-
-export const ShareWorkflowButton = memo(({ workflow }: { workflow: WorkflowRecordListItemWithThumbnailDTO }) => {
- const shareWorkflow = useShareWorkflow();
- const { t } = useTranslation();
-
- const handleClickShare = useCallback(
- (e: MouseEvent) => {
- e.stopPropagation();
- shareWorkflow(workflow);
- },
- [shareWorkflow, workflow]
- );
-
- return (
-
- }
- />
-
- );
-});
-
-ShareWorkflowButton.displayName = 'ShareWorkflowButton';
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx
index 18dc640e90f..a86f3291e0b 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx
@@ -8,16 +8,11 @@ import {
ModalHeader,
ModalOverlay,
} from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import { useWorkflowLibraryModal } from 'features/nodes/store/workflowLibraryModal';
-import {
- $workflowLibraryCategoriesOptions,
- selectWorkflowLibraryView,
- workflowLibraryViewChanged,
-} from 'features/nodes/store/workflowLibrarySlice';
-import { memo, useEffect, useMemo, useState } from 'react';
+import { selectWorkflowLibraryView, workflowLibraryViewChanged } from 'features/nodes/store/workflowLibrarySlice';
+import { memo, useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useGetCountsByCategoryQuery } from 'services/api/endpoints/workflows';
@@ -59,6 +54,26 @@ export const WorkflowLibraryModal = memo(() => {
});
WorkflowLibraryModal.displayName = 'WorkflowLibraryModal';
+const recentWorkflowsCountQueryArg = {
+ categories: ['user', 'default'],
+ has_been_opened: true,
+} satisfies Parameters[0];
+
+const yourWorkflowsCountQueryArg = {
+ categories: ['user'],
+} satisfies Parameters[0];
+
+const queryOptions = {
+ selectFromResult: ({ data, isLoading }) => {
+ if (!data) {
+ return { count: 0, isLoading: true };
+ }
+ return {
+ count: Object.values(data).reduce((acc, count) => acc + count, 0),
+ isLoading,
+ };
+ },
+} satisfies Parameters[1];
/**
* On first app load, if the user's selected view has no workflows, switches to the next available view.
@@ -66,38 +81,7 @@ WorkflowLibraryModal.displayName = 'WorkflowLibraryModal';
const useSyncInitialWorkflowLibraryCategories = () => {
const dispatch = useAppDispatch();
const view = useAppSelector(selectWorkflowLibraryView);
- const categoryOptions = useStore($workflowLibraryCategoriesOptions);
const [didSync, setDidSync] = useState(false);
- const recentWorkflowsCountQueryArg = useMemo(
- () =>
- ({
- categories: ['user', 'project', 'default'],
- has_been_opened: true,
- }) satisfies Parameters[0],
- []
- );
- const yourWorkflowsCountQueryArg = useMemo(
- () =>
- ({
- categories: ['user', 'project'],
- }) satisfies Parameters[0],
- []
- );
- const queryOptions = useMemo(
- () =>
- ({
- selectFromResult: ({ data, isLoading }) => {
- if (!data) {
- return { count: 0, isLoading: true };
- }
- return {
- count: Object.values(data).reduce((acc, count) => acc + count, 0),
- isLoading,
- };
- },
- }) satisfies Parameters[1],
- []
- );
const { count: recentWorkflowsCount, isLoading: isLoadingRecentWorkflowsCount } = useGetCountsByCategoryQuery(
recentWorkflowsCountQueryArg,
@@ -119,7 +103,7 @@ const useSyncInitialWorkflowLibraryCategories = () => {
} else {
dispatch(workflowLibraryViewChanged('defaults'));
}
- } else if (yourWorkflowsCount === 0 && (view === 'yours' || view === 'shared' || view === 'private')) {
+ } else if (yourWorkflowsCount === 0 && view === 'yours') {
if (recentWorkflowsCount > 0) {
dispatch(workflowLibraryViewChanged('recent'));
} else {
@@ -128,7 +112,6 @@ const useSyncInitialWorkflowLibraryCategories = () => {
}
setDidSync(true);
}, [
- categoryOptions,
didSync,
dispatch,
isLoadingRecentWorkflowsCount,
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx
index 604040d63a8..dedfdcc6599 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx
@@ -12,57 +12,35 @@ import {
Text,
Tooltip,
} from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { getOverlayScrollbarsParams, overlayScrollbarsStyles } from 'common/components/OverlayScrollbars/constants';
import type { WorkflowLibraryView, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
import {
- $workflowLibraryCategoriesOptions,
- $workflowLibraryTagCategoriesOptions,
- $workflowLibraryTagOptions,
selectWorkflowLibrarySelectedTags,
selectWorkflowLibraryView,
+ WORKFLOW_LIBRARY_TAG_CATEGORIES,
+ WORKFLOW_LIBRARY_TAGS,
workflowLibraryTagsReset,
workflowLibraryTagToggled,
workflowLibraryViewChanged,
} from 'features/nodes/store/workflowLibrarySlice';
-import { selectAllowPublishWorkflows } from 'features/system/store/configSlice';
import { NewWorkflowButton } from 'features/workflowLibrary/components/NewWorkflowButton';
import { UploadWorkflowButton } from 'features/workflowLibrary/components/UploadWorkflowButton';
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import { PiArrowCounterClockwiseBold, PiStarFill, PiUsersBold } from 'react-icons/pi';
+import { PiArrowCounterClockwiseBold, PiStarFill } from 'react-icons/pi';
import { useDispatch } from 'react-redux';
import { useGetCountsByTagQuery } from 'services/api/endpoints/workflows';
export const WorkflowLibrarySideNav = () => {
const { t } = useTranslation();
- const categoryOptions = useStore($workflowLibraryCategoriesOptions);
- const view = useAppSelector(selectWorkflowLibraryView);
- const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows);
return (
{t('workflows.recentlyOpened')}
{t('workflows.yourWorkflows')}
- {categoryOptions.includes('project') && (
-
-
-
- {t('workflows.private')}
-
- } view="shared">
- {t('workflows.shared')}
-
-
-
-
- )}
- {allowPublishWorkflows && (
- {t('workflows.published')}
- )}
@@ -112,7 +90,6 @@ BrowseWorkflowsButton.displayName = 'BrowseWorkflowsButton';
const overlayscrollbarsOptions = getOverlayScrollbarsParams({ visibility: 'visible' }).options;
const DefaultsViewCheckboxesCollapsible = memo(() => {
- const tagCategoryOptions = useStore($workflowLibraryTagCategoriesOptions);
const view = useAppSelector(selectWorkflowLibraryView);
return (
@@ -120,7 +97,7 @@ const DefaultsViewCheckboxesCollapsible = memo(() => {
- {tagCategoryOptions.map((tagCategory) => (
+ {WORKFLOW_LIBRARY_TAG_CATEGORIES.map((tagCategory) => (
))}
@@ -131,16 +108,12 @@ const DefaultsViewCheckboxesCollapsible = memo(() => {
});
DefaultsViewCheckboxesCollapsible.displayName = 'DefaultsViewCheckboxes';
+const tagCountQueryArg = {
+ tags: WORKFLOW_LIBRARY_TAGS.map((tag) => tag.label),
+ categories: ['default'],
+} satisfies Parameters[0];
+
const useCountForIndividualTag = (tag: string) => {
- const allTags = useStore($workflowLibraryTagOptions);
- const queryArg = useMemo(
- () =>
- ({
- tags: allTags.map((tag) => tag.label),
- categories: ['default'],
- }) satisfies Parameters[0],
- [allTags]
- );
const queryOptions = useMemo(
() =>
({
@@ -151,21 +124,12 @@ const useCountForIndividualTag = (tag: string) => {
[tag]
);
- const { count } = useGetCountsByTagQuery(queryArg, queryOptions);
+ const { count } = useGetCountsByTagQuery(tagCountQueryArg, queryOptions);
return count;
};
const useCountForTagCategory = (tagCategory: WorkflowTagCategory) => {
- const allTags = useStore($workflowLibraryTagOptions);
- const queryArg = useMemo(
- () =>
- ({
- tags: allTags.map((tag) => tag.label),
- categories: ['default'], // We only allow filtering by tag for default workflows
- }) satisfies Parameters[0],
- [allTags]
- );
const queryOptions = useMemo(
() =>
({
@@ -181,7 +145,7 @@ const useCountForTagCategory = (tagCategory: WorkflowTagCategory) => {
[tagCategory]
);
- const { count } = useGetCountsByTagQuery(queryArg, queryOptions);
+ const { count } = useGetCountsByTagQuery(tagCountQueryArg, queryOptions);
return count;
};
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx
index 61802b37fb7..203a1f7a319 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx
@@ -29,15 +29,9 @@ const getCategories = (view: WorkflowLibraryView): WorkflowCategory[] => {
case 'defaults':
return ['default'];
case 'recent':
- return ['user', 'project', 'default'];
+ return ['user', 'default'];
case 'yours':
- return ['user', 'project'];
- case 'private':
return ['user'];
- case 'shared':
- return ['project'];
- case 'published':
- return ['user', 'project', 'default'];
default:
assert>(false);
}
@@ -68,7 +62,6 @@ const useInfiniteQueryAry = () => {
query: debouncedSearchTerm,
tags: view === 'defaults' ? selectedTags : [],
has_been_opened: getHasBeenOpened(view),
- is_published: view === 'published' ? true : undefined,
} satisfies Parameters[0];
}, [orderBy, direction, view, debouncedSearchTerm, selectedTags]);
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx
index 93b8cc1c12f..8ea1bbf511f 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx
@@ -1,15 +1,13 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import { Badge, Flex, Icon, Image, Spacer, Text } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { LockedWorkflowIcon } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon';
-import { ShareWorkflowButton } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow';
import { selectWorkflowId } from 'features/nodes/store/selectors';
import { workflowModeChanged } from 'features/nodes/store/workflowLibrarySlice';
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import InvokeLogo from 'public/assets/images/invoke-symbol-wht-lrg.svg';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import { PiImage, PiUsersBold } from 'react-icons/pi';
+import { PiImage } from 'react-icons/pi';
import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types';
import { DeleteWorkflow } from './WorkflowLibraryListItemActions/DeleteWorkflow';
@@ -82,7 +80,7 @@ export const WorkflowListItem = memo(({ workflow }: { workflow: WorkflowRecordLi
{workflow.name}
- {isActive && !workflow.is_published && (
+ {isActive && (
)}
- {workflow.is_published && (
-
- {t('workflows.builder.published')}
-
- )}
- {workflow.category === 'project' && }
{workflow.category === 'default' && (
)}
- {workflow.category === 'default' && !workflow.is_published && (
-
- )}
- {workflow.category !== 'default' && !workflow.is_published && (
+ {workflow.category === 'default' && }
+ {workflow.category !== 'default' && (
<>
>
)}
- {workflow.category === 'project' && }
- {workflow.is_published && }
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowSortControl.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowSortControl.tsx
index 8f58e2a7c0f..019af3bef59 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowSortControl.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowSortControl.tsx
@@ -1,15 +1,14 @@
import { Flex, FormControl, FormLabel, Select } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
- $workflowLibrarySortOptions,
selectWorkflowLibraryDirection,
selectWorkflowLibraryOrderBy,
+ WORKFLOW_LIBRARY_SORT_OPTIONS,
workflowLibraryDirectionChanged,
workflowLibraryOrderByChanged,
} from 'features/nodes/store/workflowLibrarySlice';
import type { ChangeEvent } from 'react';
-import { useCallback, useEffect, useMemo } from 'react';
+import { useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { z } from 'zod';
@@ -26,7 +25,6 @@ export const WorkflowSortControl = () => {
const orderBy = useAppSelector(selectWorkflowLibraryOrderBy);
const direction = useAppSelector(selectWorkflowLibraryDirection);
- const sortOptions = useStore($workflowLibrarySortOptions);
const ORDER_BY_LABELS = useMemo(
() => ({
@@ -68,19 +66,12 @@ export const WorkflowSortControl = () => {
[dispatch]
);
- useEffect(() => {
- if (!sortOptions.includes('opened_at')) {
- dispatch(workflowLibraryOrderByChanged('name'));
- dispatch(workflowLibraryDirectionChanged('ASC'));
- }
- }, [sortOptions, dispatch]);
-
return (
{t('common.orderBy')}
-
diff --git a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx
index 1d5eacc669c..10cc8b0d984 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx
@@ -3,13 +3,19 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectCLIPSkip, selectModel, setClipSkip } from 'features/controlLayers/store/paramsSlice';
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
-import { selectCLIPSkipConfig } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 0,
+ sliderMin: 0,
+ numberInputMin: 0,
+ fineStep: 1,
+ coarseStep: 1,
+};
+
const ParamClipSkip = () => {
const clipSkip = useAppSelector(selectCLIPSkip);
- const config = useAppSelector(selectCLIPSkipConfig);
const model = useAppSelector(selectModel);
const dispatch = useAppDispatch();
@@ -47,21 +53,21 @@ const ParamClipSkip = () => {
diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx
index 40145839085..a237896c676 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx
@@ -3,22 +3,10 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { bboxAspectRatioIdChanged } from 'features/controlLayers/store/canvasSlice';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
-import {
- selectIsChatGPT4o,
- selectIsFluxKontext,
- selectIsImagen3,
- selectIsImagen4,
-} from 'features/controlLayers/store/paramsSlice';
import { selectAspectRatioID } from 'features/controlLayers/store/selectors';
-import {
- isAspectRatioID,
- zAspectRatioID,
- zChatGPT4oAspectRatioID,
- zFluxKontextAspectRatioID,
- zImagen3AspectRatioID,
-} from 'features/controlLayers/store/types';
+import { isAspectRatioID, zAspectRatioID } from 'features/controlLayers/store/types';
import type { ChangeEventHandler } from 'react';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCaretDownBold } from 'react-icons/pi';
@@ -27,24 +15,6 @@ export const BboxAspectRatioSelect = memo(() => {
const dispatch = useAppDispatch();
const id = useAppSelector(selectAspectRatioID);
const isStaging = useCanvasIsStaging();
- const isImagen3 = useAppSelector(selectIsImagen3);
- const isChatGPT4o = useAppSelector(selectIsChatGPT4o);
- const isImagen4 = useAppSelector(selectIsImagen4);
- const isFluxKontext = useAppSelector(selectIsFluxKontext);
- const options = useMemo(() => {
- // Imagen3 and ChatGPT4o have different aspect ratio options, and do not support freeform sizes
- if (isImagen3 || isImagen4) {
- return zImagen3AspectRatioID.options;
- }
- if (isChatGPT4o) {
- return zChatGPT4oAspectRatioID.options;
- }
- if (isFluxKontext) {
- return zFluxKontextAspectRatioID.options;
- }
- // All other models
- return zAspectRatioID.options;
- }, [isImagen3, isChatGPT4o, isImagen4, isFluxKontext]);
const onChange = useCallback>(
(e) => {
@@ -62,7 +32,7 @@ export const BboxAspectRatioSelect = memo(() => {
{t('parameters.aspect')}
}>
- {options.map((ratio) => (
+ {zAspectRatioID.options.map((ratio) => (
diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx
index dd8e319447d..cfaee3d0c95 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx
@@ -4,16 +4,24 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
import { bboxHeightChanged } from 'features/controlLayers/store/canvasSlice';
import { selectGridSize, selectHeight, selectOptimalDimension } from 'features/controlLayers/store/selectors';
import { useIsBboxSizeLocked } from 'features/parameters/components/Bbox/use-is-bbox-size-locked';
-import { selectHeightConfig } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 512,
+ sliderMin: 64,
+ sliderMax: 1536,
+ numberInputMin: 64,
+ numberInputMax: 4096,
+ fineStep: 8,
+ coarseStep: 64,
+};
+
export const BboxHeight = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const optimalDimension = useAppSelector(selectOptimalDimension);
const height = useAppSelector(selectHeight);
- const config = useAppSelector(selectHeightConfig);
const isBboxSizeLocked = useIsBboxSizeLocked();
const gridSize = useAppSelector(selectGridSize);
@@ -24,10 +32,7 @@ export const BboxHeight = memo(() => {
[dispatch]
);
- const marks = useMemo(
- () => [config.sliderMin, optimalDimension, config.sliderMax],
- [config.sliderMin, config.sliderMax, optimalDimension]
- );
+ const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]);
return (
@@ -38,9 +43,9 @@ export const BboxHeight = memo(() => {
value={height}
defaultValue={optimalDimension}
onChange={onChange}
- min={config.sliderMin}
- max={config.sliderMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.sliderMin}
+ max={CONSTRAINTS.sliderMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
marks={marks}
/>
@@ -48,9 +53,9 @@ export const BboxHeight = memo(() => {
value={height}
defaultValue={optimalDimension}
onChange={onChange}
- min={config.numberInputMin}
- max={config.numberInputMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.numberInputMin}
+ max={CONSTRAINTS.numberInputMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
/>
diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx
index da7338e72e3..db9b53f6ece 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx
@@ -4,16 +4,21 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { bboxScaledHeightChanged } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectGridSize, selectOptimalDimension } from 'features/controlLayers/store/selectors';
import { useIsBboxSizeLocked } from 'features/parameters/components/Bbox/use-is-bbox-size-locked';
-import { selectConfigSlice } from 'features/system/store/configSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const selectIsManual = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.scaleMethod === 'manual');
const selectScaledHeight = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.scaledSize.height);
-const selectScaledBoundingBoxHeightConfig = createSelector(
- selectConfigSlice,
- (config) => config.sd.scaledBoundingBoxHeight
-);
+
+const CONSTRAINTS = {
+ initial: 512,
+ sliderMin: 64,
+ sliderMax: 1536,
+ numberInputMin: 64,
+ numberInputMax: 4096,
+ fineStep: 8,
+ coarseStep: 64,
+};
const BboxScaledHeight = () => {
const { t } = useTranslation();
@@ -22,7 +27,6 @@ const BboxScaledHeight = () => {
const optimalDimension = useAppSelector(selectOptimalDimension);
const isManual = useAppSelector(selectIsManual);
const scaledHeight = useAppSelector(selectScaledHeight);
- const config = useAppSelector(selectScaledBoundingBoxHeightConfig);
const gridSize = useAppSelector(selectGridSize);
const onChange = useCallback(
@@ -36,9 +40,9 @@ const BboxScaledHeight = () => {
{t('parameters.scaledHeight')}
{
defaultValue={optimalDimension}
/>
canvas.bbox.scaleMethod === 'manual');
const selectScaledWidth = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.scaledSize.width);
-const selectScaledBoundingBoxWidthConfig = createSelector(
- selectConfigSlice,
- (config) => config.sd.scaledBoundingBoxWidth
-);
+
+const CONSTRAINTS = {
+ initial: 512,
+ sliderMin: 64,
+ sliderMax: 1536,
+ numberInputMin: 64,
+ numberInputMax: 4096,
+ fineStep: 8,
+ coarseStep: 64,
+};
const BboxScaledWidth = () => {
const { t } = useTranslation();
@@ -22,7 +27,6 @@ const BboxScaledWidth = () => {
const optimalDimension = useAppSelector(selectOptimalDimension);
const isManual = useAppSelector(selectIsManual);
const scaledWidth = useAppSelector(selectScaledWidth);
- const config = useAppSelector(selectScaledBoundingBoxWidthConfig);
const gridSize = useAppSelector(selectGridSize);
const onChange = useCallback(
@@ -36,9 +40,9 @@ const BboxScaledWidth = () => {
{t('parameters.scaledWidth')}
{
marks
/>
{
- const supportsAspectRatio = useAppSelector(selectModelSupportsAspectRatio);
- const supportsPixelDimensions = useAppSelector(selectModelSupportsPixelDimensions);
-
- if (!supportsAspectRatio) {
- return null;
- }
-
return (
@@ -30,20 +17,11 @@ export const BboxSettings = memo(() => {
- {supportsPixelDimensions && (
- <>
-
-
- >
- )}
+
+
- {supportsPixelDimensions && (
- <>
-
-
- >
- )}
- {!supportsPixelDimensions && }
+
+
diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx
index 8ad457da7ac..740df6cf218 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx
@@ -4,16 +4,24 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
import { bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
import { selectGridSize, selectOptimalDimension, selectWidth } from 'features/controlLayers/store/selectors';
import { useIsBboxSizeLocked } from 'features/parameters/components/Bbox/use-is-bbox-size-locked';
-import { selectWidthConfig } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 512,
+ sliderMin: 64,
+ sliderMax: 1536,
+ numberInputMin: 64,
+ numberInputMax: 4096,
+ fineStep: 8,
+ coarseStep: 64,
+};
+
export const BboxWidth = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const width = useAppSelector(selectWidth);
const optimalDimension = useAppSelector(selectOptimalDimension);
- const config = useAppSelector(selectWidthConfig);
const isBboxSizeLocked = useIsBboxSizeLocked();
const gridSize = useAppSelector(selectGridSize);
@@ -24,10 +32,7 @@ export const BboxWidth = memo(() => {
[dispatch]
);
- const marks = useMemo(
- () => [config.sliderMin, optimalDimension, config.sliderMax],
- [config.sliderMax, config.sliderMin, optimalDimension]
- );
+ const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]);
return (
@@ -38,9 +43,9 @@ export const BboxWidth = memo(() => {
value={width}
onChange={onChange}
defaultValue={optimalDimension}
- min={config.sliderMin}
- max={config.sliderMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.sliderMin}
+ max={CONSTRAINTS.sliderMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
marks={marks}
/>
@@ -48,9 +53,9 @@ export const BboxWidth = memo(() => {
value={width}
onChange={onChange}
defaultValue={optimalDimension}
- min={config.numberInputMin}
- max={config.numberInputMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.numberInputMin}
+ max={CONSTRAINTS.numberInputMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
/>
diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts b/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts
index 57b55d8a21e..eaf13811088 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts
+++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts
@@ -1,9 +1,6 @@
-import { useAppSelector } from 'app/store/storeHooks';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
-import { selectIsApiBaseModel } from 'features/controlLayers/store/paramsSlice';
export const useIsBboxSizeLocked = () => {
const isStaging = useCanvasIsStaging();
- const isApiModel = useAppSelector(selectIsApiBaseModel);
- return isApiModel || isStaging;
+ return isStaging;
};
diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx
index 007b2b04887..ed830413967 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx
@@ -2,14 +2,22 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectCanvasCoherenceEdgeSize, setCanvasCoherenceEdgeSize } from 'features/controlLayers/store/paramsSlice';
-import { selectCanvasCoherenceEdgeSizeConfig } from 'features/system/store/configSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 16,
+ sliderMin: 0,
+ sliderMax: 128,
+ numberInputMin: 0,
+ numberInputMax: 1024,
+ fineStep: 8,
+ coarseStep: 16,
+};
+
const ParamCanvasCoherenceEdgeSize = () => {
const dispatch = useAppDispatch();
const canvasCoherenceEdgeSize = useAppSelector(selectCanvasCoherenceEdgeSize);
- const config = useAppSelector(selectCanvasCoherenceEdgeSizeConfig);
const { t } = useTranslation();
@@ -26,22 +34,22 @@ const ParamCanvasCoherenceEdgeSize = () => {
{t('parameters.coherenceEdgeSize')}
diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx
index a165388fdcd..082e9ee8097 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx
@@ -2,15 +2,23 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectMaskBlur, setMaskBlur } from 'features/controlLayers/store/paramsSlice';
-import { selectMaskBlurConfig } from 'features/system/store/configSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 16,
+ sliderMin: 0,
+ sliderMax: 128,
+ numberInputMin: 0,
+ numberInputMax: 512,
+ fineStep: 1,
+ coarseStep: 1,
+};
+
const ParamMaskBlur = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const maskBlur = useAppSelector(selectMaskBlur);
- const config = useAppSelector(selectMaskBlurConfig);
const handleChange = useCallback(
(v: number) => {
@@ -27,21 +35,21 @@ const ParamMaskBlur = () => {
);
diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx
index 2ae24fdb805..5b795aeaddd 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx
@@ -1,34 +1,39 @@
-import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
+import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
+import { EMPTY_ARRAY } from 'app/store/constants';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectInfillMethod, setInfillMethod } from 'features/controlLayers/store/paramsSlice';
+import { zInfillMethod } from 'features/controlLayers/store/types';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import { useGetAppConfigQuery } from 'services/api/endpoints/appInfo';
+import { useGetPatchmatchStatusQuery } from 'services/api/endpoints/appInfo';
const ParamInfillMethod = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const infillMethod = useAppSelector(selectInfillMethod);
- const { data: appConfigData } = useGetAppConfigQuery();
- const options = useMemo(
- () =>
- appConfigData
- ? appConfigData.infill_methods.map((method) => ({
- label: method,
- value: method,
- }))
- : [],
- [appConfigData]
- );
+ const { options } = useGetPatchmatchStatusQuery(undefined, {
+ selectFromResult: ({ data: isPatchmatchAvailable }) => {
+ if (isPatchmatchAvailable === undefined) {
+ // loading...
+ return { options: EMPTY_ARRAY };
+ }
+ if (isPatchmatchAvailable) {
+ return { options: zInfillMethod.options.map((o) => ({ label: o, value: o })) };
+ }
+ return {
+ options: zInfillMethod.options.filter((o) => o !== 'patchmatch').map((o) => ({ label: o, value: o })),
+ };
+ },
+ });
const onChange = useCallback(
(v) => {
if (!v || !options.find((o) => o.value === v.value)) {
return;
}
- dispatch(setInfillMethod(v.value));
+ dispatch(setInfillMethod(zInfillMethod.parse(v.value)));
},
[dispatch, options]
);
diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx
index f2998b9f84b..5b50bdbacd3 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx
@@ -6,15 +6,23 @@ import {
selectInfillPatchmatchDownscaleSize,
setInfillPatchmatchDownscaleSize,
} from 'features/controlLayers/store/paramsSlice';
-import { selectInfillPatchmatchDownscaleSizeConfig } from 'features/system/store/configSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 1,
+ sliderMin: 1,
+ sliderMax: 10,
+ numberInputMin: 1,
+ numberInputMax: 10,
+ fineStep: 1,
+ coarseStep: 1,
+};
+
const ParamInfillPatchmatchDownscaleSize = () => {
const dispatch = useAppDispatch();
const infillMethod = useAppSelector(selectInfillMethod);
const infillPatchmatchDownscaleSize = useAppSelector(selectInfillPatchmatchDownscaleSize);
- const config = useAppSelector(selectInfillPatchmatchDownscaleSizeConfig);
const { t } = useTranslation();
@@ -34,20 +42,20 @@ const ParamInfillPatchmatchDownscaleSize = () => {
value={infillPatchmatchDownscaleSize}
onChange={handleChange}
marks
- defaultValue={config.initial}
- min={config.sliderMin}
- max={config.sliderMax}
- step={config.coarseStep}
- fineStep={config.fineStep}
+ defaultValue={CONSTRAINTS.initial}
+ min={CONSTRAINTS.sliderMin}
+ max={CONSTRAINTS.sliderMax}
+ step={CONSTRAINTS.coarseStep}
+ fineStep={CONSTRAINTS.fineStep}
/>
);
diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx
index 3df4b3e9282..dde61cc6271 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx
@@ -1,14 +1,22 @@
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { selectInfillMethod, selectInfillTileSize, setInfillTileSize } from 'features/controlLayers/store/paramsSlice';
-import { selectInfillTileSizeConfig } from 'features/system/store/configSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 32,
+ sliderMin: 16,
+ sliderMax: 64,
+ numberInputMin: 16,
+ numberInputMax: 256,
+ fineStep: 1,
+ coarseStep: 1,
+};
+
const ParamInfillTileSize = () => {
const dispatch = useAppDispatch();
const infillTileSize = useAppSelector(selectInfillTileSize);
- const config = useAppSelector(selectInfillTileSizeConfig);
const infillMethod = useAppSelector(selectInfillMethod);
const { t } = useTranslation();
@@ -26,21 +34,21 @@ const ParamInfillTileSize = () => {
);
diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx
index 145ca6f2da7..c0e7a3c2a72 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx
@@ -2,19 +2,25 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectCFGScale, setCfgScale } from 'features/controlLayers/store/paramsSlice';
-import { selectCFGScaleConfig } from 'features/system/store/configSlice';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+export const CONSTRAINTS = {
+ initial: 7,
+ sliderMin: 1,
+ sliderMax: 20,
+ numberInputMin: 1,
+ numberInputMax: 200,
+ fineStep: 0.1,
+ coarseStep: 0.5,
+};
+
+export const MARKS = [CONSTRAINTS.sliderMin, Math.floor(CONSTRAINTS.sliderMax / 2), CONSTRAINTS.sliderMax];
+
const ParamCFGScale = () => {
const cfgScale = useAppSelector(selectCFGScale);
- const config = useAppSelector(selectCFGScaleConfig);
const dispatch = useAppDispatch();
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback((v: number) => dispatch(setCfgScale(v)), [dispatch]);
return (
@@ -24,21 +30,21 @@ const ParamCFGScale = () => {
diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx
index 86740e0846d..290f170b6ce 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx
@@ -2,23 +2,29 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectGuidance, setGuidance } from 'features/controlLayers/store/paramsSlice';
-import { selectGuidanceConfig } from 'features/system/store/configSlice';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+export const CONSTRAINTS = {
+ initial: 4,
+ sliderMin: 2,
+ sliderMax: 6,
+ numberInputMin: 1,
+ numberInputMax: 20,
+ fineStep: 0.1,
+ coarseStep: 0.5,
+};
+
+export const MARKS = [
+ CONSTRAINTS.sliderMin,
+ Math.floor(CONSTRAINTS.sliderMax - (CONSTRAINTS.sliderMax - CONSTRAINTS.sliderMin) / 2),
+ CONSTRAINTS.sliderMax,
+];
+
const ParamGuidance = () => {
const guidance = useAppSelector(selectGuidance);
- const config = useAppSelector(selectGuidanceConfig);
const dispatch = useAppDispatch();
const { t } = useTranslation();
- const marks = useMemo(
- () => [
- config.sliderMin,
- Math.floor(config.sliderMax - (config.sliderMax - config.sliderMin) / 2),
- config.sliderMax,
- ],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback((v: number) => dispatch(setGuidance(v)), [dispatch]);
return (
@@ -28,21 +34,21 @@ const ParamGuidance = () => {
diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx
index 8001d81c9f2..73d22f0eac7 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx
@@ -1,5 +1,4 @@
import { Box, Flex, Textarea } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks';
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
import {
@@ -8,17 +7,12 @@ import {
selectPositivePrompt,
selectPositivePromptHistory,
} from 'features/controlLayers/store/paramsSlice';
-import { promptGenerationFromImageDndTarget } from 'features/dnd/dnd';
-import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { ShowDynamicPromptsPreviewButton } from 'features/dynamicPrompts/components/ShowDynamicPromptsPreviewButton';
import { NegativePromptToggleButton } from 'features/parameters/components/Core/NegativePromptToggleButton';
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
import { ViewModePrompt } from 'features/parameters/components/Prompts/ViewModePrompt';
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
-import { PromptExpansionMenu } from 'features/prompt/PromptExpansion/PromptExpansionMenu';
-import { PromptExpansionOverlay } from 'features/prompt/PromptExpansion/PromptExpansionOverlay';
-import { promptExpansionApi } from 'features/prompt/PromptExpansion/state';
import { PromptPopover } from 'features/prompt/PromptPopover';
import { usePrompt } from 'features/prompt/usePrompt';
import {
@@ -26,9 +20,7 @@ import {
selectStylePresetViewMode,
} from 'features/stylePresets/store/stylePresetSlice';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
-import { selectAllowPromptExpansion } from 'features/system/store/configSlice';
-import { selectActiveTab } from 'features/ui/store/uiSelectors';
-import React, { memo, useCallback, useMemo, useRef } from 'react';
+import React, { memo, useCallback, useRef } from 'react';
import type { HotkeyCallback } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next';
import { useClickAway } from 'react-use';
@@ -120,9 +112,6 @@ export const ParamPositivePrompt = memo(() => {
const viewMode = useAppSelector(selectStylePresetViewMode);
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt);
- const { isPending: isPromptExpansionPending } = useStore(promptExpansionApi.$state);
- const isPromptExpansionEnabled = useAppSelector(selectAllowPromptExpansion);
- const activeTab = useAppSelector(selectActiveTab);
const promptHistoryApi = usePromptHistory();
@@ -153,7 +142,6 @@ export const ParamPositivePrompt = memo(() => {
prompt,
textareaRef: textareaRef,
onChange: handleChange,
- isDisabled: isPromptExpansionPending,
});
// When the user clicks away from the textarea, reset the prompt history state.
@@ -204,8 +192,6 @@ export const ParamPositivePrompt = memo(() => {
dependencies: [promptHistoryApi.next, isPromptFocused],
});
- const dndTargetData = useMemo(() => promptGenerationFromImageDndTarget.getData(), []);
-
return (
@@ -224,17 +210,15 @@ export const ParamPositivePrompt = memo(() => {
paddingTop={0}
paddingBottom={3}
resize="vertical"
- minH={isPromptExpansionEnabled ? 44 : 32}
- isDisabled={isPromptExpansionPending}
+ minH={32}
/>
- {activeTab !== 'video' && modelSupportsNegativePrompt && }
+ {modelSupportsNegativePrompt && }
- {isPromptExpansionEnabled && }
{viewMode && (
@@ -244,15 +228,6 @@ export const ParamPositivePrompt = memo(() => {
label={`${t('parameters.positivePromptPlaceholder')} (${t('stylePresets.preview')})`}
/>
)}
- {isPromptExpansionEnabled && (
-
- )}
-
diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx
index f7ef4660b58..31efe5d0a6f 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx
@@ -2,19 +2,25 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectSteps, setSteps } from 'features/controlLayers/store/paramsSlice';
-import { selectStepsConfig } from 'features/system/store/configSlice';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+export const CONSTRAINTS = {
+ initial: 30,
+ sliderMin: 1,
+ sliderMax: 100,
+ numberInputMin: 1,
+ numberInputMax: 500,
+ fineStep: 1,
+ coarseStep: 1,
+};
+
+export const MARKS = [CONSTRAINTS.sliderMin, Math.floor(CONSTRAINTS.sliderMax / 2), CONSTRAINTS.sliderMax];
+
const ParamSteps = () => {
const steps = useAppSelector(selectSteps);
- const config = useAppSelector(selectStepsConfig);
const dispatch = useAppDispatch();
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback(
(v: number) => {
dispatch(setSteps(v));
@@ -29,21 +35,21 @@ const ParamSteps = () => {
diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx
index 9bcac785b36..05653c6295e 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx
@@ -1,11 +1,5 @@
import type { FormLabelProps } from '@invoke-ai/ui-library';
import { Flex, FormControlGroup } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import {
- selectModelSupportsAspectRatio,
- selectModelSupportsPixelDimensions,
-} from 'features/controlLayers/store/paramsSlice';
-import { PixelDimensionsUnsupportedAlert } from 'features/parameters/components/PixelDimensionsUnsupportedAlert';
import { memo } from 'react';
import { DimensionsAspectRatioSelect } from './DimensionsAspectRatioSelect';
@@ -17,13 +11,6 @@ import { DimensionsSwapButton } from './DimensionsSwapButton';
import { DimensionsWidth } from './DimensionsWidth';
export const Dimensions = memo(() => {
- const supportsAspectRatio = useAppSelector(selectModelSupportsAspectRatio);
- const supportsPixelDimensions = useAppSelector(selectModelSupportsPixelDimensions);
-
- if (!supportsAspectRatio) {
- return null;
- }
-
return (
@@ -31,20 +18,11 @@ export const Dimensions = memo(() => {
- {supportsPixelDimensions && (
- <>
-
-
- >
- )}
+
+
- {supportsPixelDimensions && (
- <>
-
-
- >
- )}
- {!supportsPixelDimensions && }
+
+
diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx
index bd0c0d03a6b..4d3edc6e4bd 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx
@@ -1,25 +1,10 @@
import { FormControl, FormLabel, Select } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
-import {
- aspectRatioIdChanged,
- selectAspectRatioID,
- selectIsChatGPT4o,
- selectIsFluxKontext,
- selectIsGemini2_5,
- selectIsImagen3,
- selectIsImagen4,
-} from 'features/controlLayers/store/paramsSlice';
-import {
- isAspectRatioID,
- zAspectRatioID,
- zChatGPT4oAspectRatioID,
- zFluxKontextAspectRatioID,
- zGemini2_5AspectRatioID,
- zImagen3AspectRatioID,
-} from 'features/controlLayers/store/types';
+import { aspectRatioIdChanged, selectAspectRatioID } from 'features/controlLayers/store/paramsSlice';
+import { isAspectRatioID, zAspectRatioID } from 'features/controlLayers/store/types';
import type { ChangeEventHandler } from 'react';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCaretDownBold } from 'react-icons/pi';
@@ -27,29 +12,6 @@ export const DimensionsAspectRatioSelect = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const id = useAppSelector(selectAspectRatioID);
- const isImagen3 = useAppSelector(selectIsImagen3);
- const isChatGPT4o = useAppSelector(selectIsChatGPT4o);
- const isImagen4 = useAppSelector(selectIsImagen4);
- const isFluxKontext = useAppSelector(selectIsFluxKontext);
- const isGemini2_5 = useAppSelector(selectIsGemini2_5);
-
- const options = useMemo(() => {
- // Imagen3 and ChatGPT4o have different aspect ratio options, and do not support freeform sizes
- if (isImagen3 || isImagen4) {
- return zImagen3AspectRatioID.options;
- }
- if (isChatGPT4o) {
- return zChatGPT4oAspectRatioID.options;
- }
- if (isFluxKontext) {
- return zFluxKontextAspectRatioID.options;
- }
- if (isGemini2_5) {
- return zGemini2_5AspectRatioID.options;
- }
- // All other models
- return zAspectRatioID.options;
- }, [isImagen3, isChatGPT4o, isImagen4, isFluxKontext, isGemini2_5]);
const onChange = useCallback>(
(e) => {
@@ -67,7 +29,7 @@ export const DimensionsAspectRatioSelect = memo(() => {
{t('parameters.aspect')}
}>
- {options.map((ratio) => (
+ {zAspectRatioID.options.map((ratio) => (
diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx
index a2f84f360a0..924187c1ed0 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx
@@ -1,20 +1,27 @@
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
-import { heightChanged, selectHeight, selectIsApiBaseModel } from 'features/controlLayers/store/paramsSlice';
+import { heightChanged, selectHeight } from 'features/controlLayers/store/paramsSlice';
import { selectGridSize, selectOptimalDimension } from 'features/controlLayers/store/selectors';
-import { selectHeightConfig } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
+export const CONSTRAINTS = {
+ initial: 512,
+ sliderMin: 64,
+ sliderMax: 1536,
+ numberInputMin: 64,
+ numberInputMax: 4096,
+ fineStep: 8,
+ coarseStep: 64,
+};
+
export const DimensionsHeight = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const optimalDimension = useAppSelector(selectOptimalDimension);
const height = useAppSelector(selectHeight);
- const config = useAppSelector(selectHeightConfig);
const gridSize = useAppSelector(selectGridSize);
- const isApiModel = useAppSelector(selectIsApiBaseModel);
const onChange = useCallback(
(v: number) => {
@@ -23,13 +30,10 @@ export const DimensionsHeight = memo(() => {
[dispatch]
);
- const marks = useMemo(
- () => [config.sliderMin, optimalDimension, config.sliderMax],
- [config.sliderMin, config.sliderMax, optimalDimension]
- );
+ const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]);
return (
-
+
{t('parameters.height')}
@@ -37,9 +41,9 @@ export const DimensionsHeight = memo(() => {
value={height}
defaultValue={optimalDimension}
onChange={onChange}
- min={config.sliderMin}
- max={config.sliderMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.sliderMin}
+ max={CONSTRAINTS.sliderMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
marks={marks}
/>
@@ -47,9 +51,9 @@ export const DimensionsHeight = memo(() => {
value={height}
defaultValue={optimalDimension}
onChange={onChange}
- min={config.numberInputMin}
- max={config.numberInputMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.numberInputMin}
+ max={CONSTRAINTS.numberInputMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
/>
diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx
index 2de397cc784..6ab17147a74 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx
@@ -1,10 +1,6 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import {
- aspectRatioLockToggled,
- selectAspectRatioIsLocked,
- selectIsApiBaseModel,
-} from 'features/controlLayers/store/paramsSlice';
+import { aspectRatioLockToggled, selectAspectRatioIsLocked } from 'features/controlLayers/store/paramsSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiLockSimpleFill, PiLockSimpleOpenBold } from 'react-icons/pi';
@@ -13,7 +9,6 @@ export const DimensionsLockAspectRatioButton = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const isLocked = useAppSelector(selectAspectRatioIsLocked);
- const isApiModel = useAppSelector(selectIsApiBaseModel);
const onClick = useCallback(() => {
dispatch(aspectRatioLockToggled());
@@ -27,7 +22,6 @@ export const DimensionsLockAspectRatioButton = memo(() => {
variant={isLocked ? 'outline' : 'ghost'}
size="sm"
icon={isLocked ? : }
- isDisabled={isApiModel}
/>
);
});
diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx
index eda44ba925d..c1c43f0cec4 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx
@@ -1,11 +1,6 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import {
- selectHeight,
- selectIsApiBaseModel,
- selectWidth,
- sizeOptimized,
-} from 'features/controlLayers/store/paramsSlice';
+import { selectHeight, selectWidth, sizeOptimized } from 'features/controlLayers/store/paramsSlice';
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
import { getIsSizeTooLarge, getIsSizeTooSmall } from 'features/parameters/util/optimalDimension';
import { memo, useCallback, useMemo } from 'react';
@@ -15,7 +10,6 @@ import { PiSparkleFill } from 'react-icons/pi';
export const DimensionsSetOptimalSizeButton = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
- const isApiModel = useAppSelector(selectIsApiBaseModel);
const width = useAppSelector(selectWidth);
const height = useAppSelector(selectHeight);
const optimalDimension = useAppSelector(selectOptimalDimension);
@@ -49,7 +43,6 @@ export const DimensionsSetOptimalSizeButton = memo(() => {
size="sm"
icon={}
colorScheme={isSizeTooSmall || isSizeTooLarge ? 'warning' : 'base'}
- isDisabled={isApiModel}
/>
);
});
diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx
index eb2f96af96a..20a754c5c30 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx
@@ -1,22 +1,27 @@
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
-import { selectIsApiBaseModel, selectWidth, widthChanged } from 'features/controlLayers/store/paramsSlice';
+import { selectWidth, widthChanged } from 'features/controlLayers/store/paramsSlice';
import { selectGridSize, selectOptimalDimension } from 'features/controlLayers/store/selectors';
-import { selectWidthConfig } from 'features/system/store/configSlice';
-import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
+export const CONSTRAINTS = {
+ initial: 512,
+ sliderMin: 64,
+ sliderMax: 1536,
+ numberInputMin: 64,
+ numberInputMax: 4096,
+ fineStep: 8,
+ coarseStep: 64,
+};
+
export const DimensionsWidth = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const width = useAppSelector(selectWidth);
const optimalDimension = useAppSelector(selectOptimalDimension);
- const config = useAppSelector(selectWidthConfig);
- const isApiModel = useAppSelector(selectIsApiBaseModel);
const gridSize = useAppSelector(selectGridSize);
- const activeTab = useAppSelector(selectActiveTab);
const onChange = useCallback(
(v: number) => {
@@ -25,13 +30,10 @@ export const DimensionsWidth = memo(() => {
[dispatch]
);
- const marks = useMemo(
- () => [config.sliderMin, optimalDimension, config.sliderMax],
- [config.sliderMax, config.sliderMin, optimalDimension]
- );
+ const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]);
return (
-
+
{t('parameters.width')}
@@ -39,9 +41,9 @@ export const DimensionsWidth = memo(() => {
value={width}
onChange={onChange}
defaultValue={optimalDimension}
- min={config.sliderMin}
- max={config.sliderMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.sliderMin}
+ max={CONSTRAINTS.sliderMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
marks={marks}
/>
@@ -49,9 +51,9 @@ export const DimensionsWidth = memo(() => {
value={width}
onChange={onChange}
defaultValue={optimalDimension}
- min={config.numberInputMin}
- max={config.numberInputMax}
- step={config.coarseStep}
+ min={CONSTRAINTS.numberInputMin}
+ max={CONSTRAINTS.numberInputMax}
+ step={CONSTRAINTS.coarseStep}
fineStep={gridSize}
/>
diff --git a/invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx b/invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx
deleted file mode 100644
index 87871387be2..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx
+++ /dev/null
@@ -1,39 +0,0 @@
-import { Flex, Link, Text } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectModel } from 'features/controlLayers/store/paramsSlice';
-import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled';
-import { Trans, useTranslation } from 'react-i18next';
-
-export const DisabledModelWarning = () => {
- const { t } = useTranslation();
- const model = useAppSelector(selectModel);
-
- const accountSettingsLink = useStore($accountSettingsLink);
- const { isChatGPT4oHighModelDisabled } = useIsModelDisabled();
-
- if (!model || !isChatGPT4oHighModelDisabled(model)) {
- return null;
- }
-
- return (
-
-
-
- {t('parameters.invoke.accountSettings')}
-
- ),
- }}
- />
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx b/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx
index 6d97b6bdd71..5a06bf8c514 100644
--- a/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx
@@ -1,34 +1,23 @@
import type { IconButtonProps } from '@invoke-ai/ui-library';
import { IconButton } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCubeBold } from 'react-icons/pi';
export const NavigateToModelManagerButton = memo((props: Omit) => {
- const isModelsTabDisabled = useAppSelector(selectIsModelsTabDisabled);
- const onClickGoToModelManager = useStore($onClickGoToModelManager);
-
const { t } = useTranslation();
const onClick = useCallback(() => {
navigationApi.switchToTab('models');
}, []);
- if (isModelsTabDisabled && !onClickGoToModelManager) {
- return null;
- }
-
return (
}
tooltip={`${t('modelManager.manageModels')}`}
aria-label={`${t('modelManager.manageModels')}`}
- onClick={onClickGoToModelManager ?? onClick}
+ onClick={onClick}
size="sm"
variant="ghost"
{...props}
diff --git a/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx b/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx
index 14d19934510..48e30cc4af3 100644
--- a/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx
@@ -12,10 +12,8 @@ import {
Spacer,
Text,
} from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
import { EMPTY_ARRAY } from 'app/store/constants';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
-import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
import { useAppSelector } from 'app/store/storeHooks';
import type { Group, PickerContextState } from 'common/components/Picker/Picker';
import { buildGroup, getRegex, isGroup, Picker, usePickerContext } from 'common/components/Picker/Picker';
@@ -24,18 +22,11 @@ import { typedMemo } from 'common/util/typedMemo';
import { uniq } from 'es-toolkit/compat';
import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
-import {
- API_BASE_MODELS,
- MODEL_BASE_TO_COLOR,
- MODEL_BASE_TO_LONG_NAME,
- MODEL_BASE_TO_SHORT_NAME,
-} from 'features/modelManagerV2/models';
+import { MODEL_BASE_TO_COLOR, MODEL_BASE_TO_LONG_NAME, MODEL_BASE_TO_SHORT_NAME } from 'features/modelManagerV2/models';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage';
import type { BaseModelType } from 'features/nodes/types/common';
import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
-import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { filesize } from 'filesize';
import { memo, useCallback, useMemo, useRef } from 'react';
@@ -76,22 +67,12 @@ type WithStarred = T & { starred?: boolean };
const getOptionId = (modelConfig: WithStarred) => modelConfig.key;
const ModelManagerLink = memo((props: ButtonProps) => {
- const onClickGoToModelManager = useStore($onClickGoToModelManager);
const onClick = useCallback(() => {
navigationApi.switchToTab('models');
setInstallModelsTabByName('launchpad');
}, []);
- return (
-
- );
+ return ;
});
ModelManagerLink.displayName = 'ModelManagerLink';
@@ -101,47 +82,31 @@ const components = {
const NoOptionsFallback = memo(({ noOptionsText }: { noOptionsText?: string }) => {
const { t } = useTranslation();
- const isModelsTabDisabled = useAppSelector(selectIsModelsTabDisabled);
- const onClickGoToModelManager = useStore($onClickGoToModelManager);
return (
{noOptionsText ?? t('modelManager.modelPickerFallbackNoModelsInstalled')}
- {(!isModelsTabDisabled || onClickGoToModelManager) && (
-
-
-
- )}
+
+
+
);
});
NoOptionsFallback.displayName = 'NoOptionsFallback';
const getGroupIDFromModelConfig = (modelConfig: AnyModelConfig): string => {
- if (API_BASE_MODELS.includes(modelConfig.base)) {
- return 'api';
- }
return modelConfig.base;
};
const getGroupNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
- if (API_BASE_MODELS.includes(modelConfig.base)) {
- return 'External API';
- }
return MODEL_BASE_TO_LONG_NAME[modelConfig.base];
};
const getGroupShortNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
- if (API_BASE_MODELS.includes(modelConfig.base)) {
- return 'api';
- }
return MODEL_BASE_TO_SHORT_NAME[modelConfig.base];
};
const getGroupColorSchemeFromModelConfig = (modelConfig: AnyModelConfig): string => {
- if (API_BASE_MODELS.includes(modelConfig.base)) {
- return 'pink';
- }
return MODEL_BASE_TO_COLOR[modelConfig.base];
};
@@ -199,11 +164,9 @@ export const ModelPicker = typedMemo(
}) => {
const { t } = useTranslation();
const selectedKeys = useAppSelector(selectSelectedModelKeys);
- const isModelRelationshipsEnabled = useFeatureStatus('modelRelationships');
const { relatedModelKeys } = useGetRelatedModelIdsBatchQuery(selectedKeys, {
...relatedModelKeysQueryOptions,
- skip: !isModelRelationshipsEnabled,
});
const options = useMemo[] | Group>[]>(() => {
@@ -447,18 +410,6 @@ const PickerOptionComponent = typedMemo(
{filesize(option.file_size)}
)}
- {option.usage_info && (
-
- {option.usage_info}
-
- )}
{option.description && !isCompactView && (
diff --git a/invokeai/frontend/web/src/features/parameters/components/PixelDimensionsUnsupportedAlert.tsx b/invokeai/frontend/web/src/features/parameters/components/PixelDimensionsUnsupportedAlert.tsx
deleted file mode 100644
index febb0e94c41..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/PixelDimensionsUnsupportedAlert.tsx
+++ /dev/null
@@ -1,14 +0,0 @@
-import { Alert, Text } from '@invoke-ai/ui-library';
-import { memo } from 'react';
-
-export const PixelDimensionsUnsupportedAlert = memo(() => {
- return (
-
-
- Select an aspect ratio to control the size of the resulting image from this model.
-
-
- );
-});
-
-PixelDimensionsUnsupportedAlert.displayName = 'PixelDimensionsUnsupportedAlert';
diff --git a/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx b/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx
index 3393c0e14e7..9de73262700 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx
@@ -8,20 +8,18 @@ import {
} from 'features/controlLayers/store/paramsSlice';
import { ParamNegativePrompt } from 'features/parameters/components/Core/ParamNegativePrompt';
import { ParamPositivePrompt } from 'features/parameters/components/Core/ParamPositivePrompt';
-import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo } from 'react';
export const Prompts = memo(() => {
const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt);
const modelSupportsRefImages = useAppSelector(selectModelSupportsRefImages);
const hasNegativePrompt = useAppSelector(selectHasNegativePrompt);
- const activeTab = useAppSelector(selectActiveTab);
return (
- {activeTab !== 'video' && modelSupportsNegativePrompt && hasNegativePrompt && }
- {activeTab !== 'video' && modelSupportsRefImages && }
+ {modelSupportsNegativePrompt && hasNegativePrompt && }
+ {modelSupportsRefImages && }
);
});
diff --git a/invokeai/frontend/web/src/features/parameters/components/Upscale/ParamUpscaleCFGScale.tsx b/invokeai/frontend/web/src/features/parameters/components/Upscale/ParamUpscaleCFGScale.tsx
index 9af368cc018..df48c52724d 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Upscale/ParamUpscaleCFGScale.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Upscale/ParamUpscaleCFGScale.tsx
@@ -2,19 +2,25 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { selectUpscaleCfgScale, setUpscaleCfgScale } from 'features/controlLayers/store/paramsSlice';
-import { selectCFGScaleConfig } from 'features/system/store/configSlice';
-import { memo, useCallback, useMemo } from 'react';
+import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
+const CONSTRAINTS = {
+ initial: 7,
+ sliderMin: 1,
+ sliderMax: 20,
+ numberInputMin: 1,
+ numberInputMax: 200,
+ fineStep: 0.1,
+ coarseStep: 0.5,
+};
+
+const MARKS = [CONSTRAINTS.sliderMin, Math.floor(CONSTRAINTS.sliderMax / 2), CONSTRAINTS.sliderMax];
+
const ParamUpscaleCFGScale = () => {
const cfgScale = useAppSelector(selectUpscaleCfgScale);
- const config = useAppSelector(selectCFGScaleConfig);
const dispatch = useAppDispatch();
const { t } = useTranslation();
- const marks = useMemo(
- () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax],
- [config.sliderMax, config.sliderMin]
- );
const onChange = useCallback((v: number) => dispatch(setUpscaleCfgScale(v)), [dispatch]);
return (
@@ -24,21 +30,21 @@ const ParamUpscaleCFGScale = () => {
diff --git a/invokeai/frontend/web/src/features/parameters/components/Video/ParamDuration.tsx b/invokeai/frontend/web/src/features/parameters/components/Video/ParamDuration.tsx
deleted file mode 100644
index aa7bb212a7b..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/Video/ParamDuration.tsx
+++ /dev/null
@@ -1,56 +0,0 @@
-import type { ComboboxOnChange } from '@invoke-ai/ui-library';
-import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import {
- isRunwayDurationID,
- isVeo3DurationID,
- RUNWAY_DURATIONS,
- VEO3_DURATIONS,
-} from 'features/controlLayers/store/types';
-import { selectVideoDuration, selectVideoModel, videoDurationChanged } from 'features/parameters/store/videoSlice';
-import { useCallback, useMemo } from 'react';
-import { useTranslation } from 'react-i18next';
-
-export const ParamDuration = () => {
- const videoDuration = useAppSelector(selectVideoDuration);
- const { t } = useTranslation();
- const dispatch = useAppDispatch();
- const model = useAppSelector(selectVideoModel);
-
- const options = useMemo(() => {
- if (model?.base === 'veo3') {
- return Object.entries(VEO3_DURATIONS).map(([key, value]) => ({
- label: value,
- value: key,
- }));
- } else if (model?.base === 'runway') {
- return Object.entries(RUNWAY_DURATIONS).map(([key, value]) => ({
- label: value,
- value: key,
- }));
- } else {
- return [];
- }
- }, [model]);
-
- const onChange = useCallback(
- (v) => {
- const duration = v?.value;
- if (!isVeo3DurationID(duration) && !isRunwayDurationID(duration)) {
- return;
- }
-
- dispatch(videoDurationChanged(duration));
- },
- [dispatch]
- );
-
- const value = useMemo(() => options.find((o) => o.value === videoDuration), [videoDuration, options]);
-
- return (
-
- {t('parameters.duration')}
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/parameters/components/Video/ParamResolution.tsx b/invokeai/frontend/web/src/features/parameters/components/Video/ParamResolution.tsx
deleted file mode 100644
index d4000fda643..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/Video/ParamResolution.tsx
+++ /dev/null
@@ -1,50 +0,0 @@
-import type { ComboboxOnChange } from '@invoke-ai/ui-library';
-import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import {
- isRunwayResolution,
- isVeo3Resolution,
- zRunwayResolution,
- zVeo3Resolution,
-} from 'features/controlLayers/store/types';
-import { selectVideoModel, selectVideoResolution, videoResolutionChanged } from 'features/parameters/store/videoSlice';
-import { useCallback, useMemo } from 'react';
-import { useTranslation } from 'react-i18next';
-
-export const ParamResolution = () => {
- const videoResolution = useAppSelector(selectVideoResolution);
- const { t } = useTranslation();
- const dispatch = useAppDispatch();
- const model = useAppSelector(selectVideoModel);
-
- const options = useMemo(() => {
- if (model?.base === 'veo3') {
- return zVeo3Resolution.options.map((o) => ({ label: o, value: o }));
- } else if (model?.base === 'runway') {
- return zRunwayResolution.options.map((o) => ({ label: o, value: o }));
- } else {
- return [];
- }
- }, [model]);
-
- const onChange = useCallback(
- (v) => {
- const resolution = v?.value;
- if (!isVeo3Resolution(resolution) && !isRunwayResolution(resolution)) {
- return;
- }
-
- dispatch(videoResolutionChanged(resolution));
- },
- [dispatch]
- );
-
- const value = useMemo(() => options.find((o) => o.value === videoResolution), [videoResolution, options]);
-
- return (
-
- {t('parameters.resolution')}
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensions.tsx b/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensions.tsx
deleted file mode 100644
index fec3ea407c3..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensions.tsx
+++ /dev/null
@@ -1,22 +0,0 @@
-import { Flex } from '@invoke-ai/ui-library';
-import { memo } from 'react';
-
-import { ParamResolution } from './ParamResolution';
-import { VideoDimensionsAspectRatioSelect } from './VideoDimensionsAspectRatioSelect';
-import { VideoDimensionsPreview } from './VideoDimensionsPreview';
-
-export const VideoDimensions = memo(() => {
- return (
-
-
-
-
-
-
-
-
-
- );
-});
-
-VideoDimensions.displayName = 'VideoDimensions';
diff --git a/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsAspectRatioSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsAspectRatioSelect.tsx
deleted file mode 100644
index 072b6c8a481..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsAspectRatioSelect.tsx
+++ /dev/null
@@ -1,54 +0,0 @@
-import type { ComboboxOnChange } from '@invoke-ai/ui-library';
-import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
-import { isVideoAspectRatio, zRunwayAspectRatioID, zVeo3AspectRatioID } from 'features/controlLayers/store/types';
-import {
- selectIsRunway,
- selectIsVeo3,
- selectVideoAspectRatio,
- videoAspectRatioChanged,
-} from 'features/parameters/store/videoSlice';
-import { memo, useCallback, useMemo } from 'react';
-import { useTranslation } from 'react-i18next';
-
-export const VideoDimensionsAspectRatioSelect = memo(() => {
- const { t } = useTranslation();
- const dispatch = useAppDispatch();
- const id = useAppSelector(selectVideoAspectRatio);
- const isVeo3 = useAppSelector(selectIsVeo3);
- const isRunway = useAppSelector(selectIsRunway);
- const options = useMemo(() => {
- if (isVeo3) {
- return zVeo3AspectRatioID.options.map((o) => ({ label: o, value: o }));
- }
- if (isRunway) {
- return zRunwayAspectRatioID.options.map((o) => ({ label: o, value: o }));
- }
- // All other models
- return [];
- }, [isVeo3, isRunway]);
-
- const onChange = useCallback(
- (v) => {
- if (!isVideoAspectRatio(v?.value)) {
- return;
- }
- dispatch(videoAspectRatioChanged(v.value));
- },
- [dispatch]
- );
-
- const value = useMemo(() => options.find((o) => o.value === id), [id, options]);
-
- return (
-
-
- {t('parameters.aspect')}
-
-
-
- );
-});
-
-VideoDimensionsAspectRatioSelect.displayName = 'VideoDimensionsAspectRatioSelect';
diff --git a/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsPreview.tsx b/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsPreview.tsx
deleted file mode 100644
index a64b7e21102..00000000000
--- a/invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsPreview.tsx
+++ /dev/null
@@ -1,88 +0,0 @@
-import { Flex, Grid, GridItem, Text } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import { ASPECT_RATIO_MAP } from 'features/controlLayers/store/types';
-import { useCurrentVideoDimensions } from 'features/parameters/hooks/useCurrentVideoDimensions';
-import { selectVideoAspectRatio } from 'features/parameters/store/videoSlice';
-import { memo, useMemo } from 'react';
-import { useMeasure } from 'react-use';
-
-export const VideoDimensionsPreview = memo(() => {
- const aspectRatio = useAppSelector(selectVideoAspectRatio);
- const [ref, dims] = useMeasure();
-
- const currentVideoDimensions = useCurrentVideoDimensions();
-
- const previewBoxSize = useMemo(() => {
- if (!dims) {
- return { width: 0, height: 0 };
- }
-
- const aspectRatioValue = ASPECT_RATIO_MAP[aspectRatio]?.ratio ?? 1;
-
- let width = currentVideoDimensions.width;
- let height = currentVideoDimensions.height;
-
- if (currentVideoDimensions.width > currentVideoDimensions.height) {
- width = dims.width;
- height = width / aspectRatioValue;
- } else {
- height = dims.height;
- width = height * aspectRatioValue;
- }
-
- return { width, height };
- }, [dims, currentVideoDimensions, aspectRatio]);
-
- return (
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {currentVideoDimensions.width}x{currentVideoDimensions.height}
-
-
-
-
- );
-});
-
-VideoDimensionsPreview.displayName = 'VideoDimensionsPreview';
diff --git a/invokeai/frontend/web/src/features/parameters/hooks/useCurrentVideoDimensions.ts b/invokeai/frontend/web/src/features/parameters/hooks/useCurrentVideoDimensions.ts
deleted file mode 100644
index 25f6299c619..00000000000
--- a/invokeai/frontend/web/src/features/parameters/hooks/useCurrentVideoDimensions.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-import { useAppSelector } from 'app/store/storeHooks';
-import type { AspectRatioID } from 'features/controlLayers/store/types';
-import { ASPECT_RATIO_MAP, RESOLUTION_MAP } from 'features/controlLayers/store/types';
-import { selectVideoAspectRatio, selectVideoResolution } from 'features/parameters/store/videoSlice';
-import { useMemo } from 'react';
-
-export const useCurrentVideoDimensions = () => {
- const videoAspectRatio = useAppSelector(selectVideoAspectRatio);
- const videoResolution = useAppSelector(selectVideoResolution);
-
- const currentVideoDimensions = useMemo(() => {
- // Default fallback dimensions
- const fallback = { width: 1280, height: 720 };
-
- if (!videoAspectRatio || !videoResolution) {
- return fallback;
- }
-
- // Get base resolution dimensions from the resolution tables
- let baseWidth: number;
- let baseHeight: number;
-
- const resolutionDims = RESOLUTION_MAP[videoResolution];
- baseWidth = resolutionDims.width;
- baseHeight = resolutionDims.height;
-
- // Get the aspect ratio value from the map
- const aspectRatioData = ASPECT_RATIO_MAP[videoAspectRatio as Exclude];
- if (!aspectRatioData) {
- return { width: baseWidth, height: baseHeight };
- }
-
- const targetRatio = aspectRatioData.ratio;
-
- // Calculate dimensions that maintain the aspect ratio while respecting the resolution
- // We use the resolution as a constraint on the total pixel count
- const totalPixels = baseWidth * baseHeight;
-
- // Calculate dimensions that match the aspect ratio and approximate the target pixel count
- // width * height = totalPixels
- // width / height = targetRatio
- // Therefore: width = sqrt(totalPixels * targetRatio) and height = sqrt(totalPixels / targetRatio)
- const calculatedWidth = Math.round(Math.sqrt(totalPixels * targetRatio));
- const calculatedHeight = Math.round(Math.sqrt(totalPixels / targetRatio));
-
- // Ensure dimensions are even numbers (common requirement for video encoding)
- const width = calculatedWidth % 2 === 0 ? calculatedWidth : calculatedWidth + 1;
- const height = calculatedHeight % 2 === 0 ? calculatedHeight : calculatedHeight + 1;
-
- return { width, height };
- }, [videoAspectRatio, videoResolution]);
-
- return currentVideoDimensions;
-};
diff --git a/invokeai/frontend/web/src/features/parameters/hooks/useIsModelDisabled.ts b/invokeai/frontend/web/src/features/parameters/hooks/useIsModelDisabled.ts
deleted file mode 100644
index dfd4e823a51..00000000000
--- a/invokeai/frontend/web/src/features/parameters/hooks/useIsModelDisabled.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
-import { useCallback } from 'react';
-
-export const useIsModelDisabled = () => {
- const isChatGPT4oHighEnabled = useFeatureStatus('chatGPT4oHigh');
-
- const isChatGPT4oHighModelDisabled = useCallback(
- (model: ParameterModel) => {
- return model?.base === 'chatgpt-4o' && model.name.toLowerCase().includes('high') && !isChatGPT4oHighEnabled;
- },
- [isChatGPT4oHighEnabled]
- );
-
- return { isChatGPT4oHighModelDisabled };
-};
diff --git a/invokeai/frontend/web/src/features/parameters/hooks/useIsTooLargeToUpscale.ts b/invokeai/frontend/web/src/features/parameters/hooks/useIsTooLargeToUpscale.ts
deleted file mode 100644
index 7428d50c9b3..00000000000
--- a/invokeai/frontend/web/src/features/parameters/hooks/useIsTooLargeToUpscale.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-import { createSelector } from '@reduxjs/toolkit';
-import { useAppSelector } from 'app/store/storeHooks';
-import type { ImageWithDims } from 'features/controlLayers/store/types';
-import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
-import { selectConfigSlice } from 'features/system/store/configSlice';
-import { useMemo } from 'react';
-
-const createIsTooLargeToUpscaleSelector = (imageWithDims?: ImageWithDims | null) =>
- createSelector(selectUpscaleSlice, selectConfigSlice, (upscale, config) => {
- const { upscaleModel, scale } = upscale;
- const { maxUpscaleDimension } = config;
-
- if (!maxUpscaleDimension || !upscaleModel || !imageWithDims) {
- // When these are missing, another warning will be shown
- return false;
- }
-
- const { width, height } = imageWithDims;
-
- const maxPixels = maxUpscaleDimension ** 2;
- const upscaledPixels = width * scale * height * scale;
-
- return upscaledPixels > maxPixels;
- });
-
-export const useIsTooLargeToUpscale = (imageWithDims?: ImageWithDims | null) => {
- const selectIsTooLargeToUpscale = useMemo(() => createIsTooLargeToUpscaleSelector(imageWithDims), [imageWithDims]);
- return useAppSelector(selectIsTooLargeToUpscale);
-};
diff --git a/invokeai/frontend/web/src/features/parameters/store/videoSlice.ts b/invokeai/frontend/web/src/features/parameters/store/videoSlice.ts
deleted file mode 100644
index f210cb91aaa..00000000000
--- a/invokeai/frontend/web/src/features/parameters/store/videoSlice.ts
+++ /dev/null
@@ -1,168 +0,0 @@
-import type { PayloadAction, Selector } from '@reduxjs/toolkit';
-import { createSelector, createSlice } from '@reduxjs/toolkit';
-import type { RootState } from 'app/store/store';
-import type { SliceConfig } from 'app/store/types';
-import { isPlainObject } from 'es-toolkit';
-import type {
- CroppableImageWithDims,
- VideoAspectRatio,
- VideoDuration,
- VideoResolution,
-} from 'features/controlLayers/store/types';
-import {
- isRunwayAspectRatioID,
- isRunwayDurationID,
- isRunwayResolution,
- isVeo3AspectRatioID,
- isVeo3DurationID,
- isVeo3Resolution,
- zCroppableImageWithDims,
- zVideoAspectRatio,
- zVideoDuration,
- zVideoResolution,
-} from 'features/controlLayers/store/types';
-import { REQUIRES_STARTING_FRAME_BASE_MODELS } from 'features/modelManagerV2/models';
-import type { ModelIdentifierField } from 'features/nodes/types/common';
-import { zModelIdentifierField } from 'features/nodes/types/common';
-import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
-import { isVideoModelConfig } from 'services/api/types';
-import { assert } from 'tsafe';
-import z from 'zod';
-
-const zVideoState = z.object({
- _version: z.literal(2),
- startingFrameImage: zCroppableImageWithDims.nullable(),
- videoModel: zModelIdentifierField.nullable(),
- videoResolution: zVideoResolution,
- videoDuration: zVideoDuration,
- videoAspectRatio: zVideoAspectRatio,
-});
-
-export type VideoState = z.infer;
-
-const getInitialState = (): VideoState => {
- return {
- _version: 2,
- startingFrameImage: null,
- videoModel: null,
- videoResolution: '1080p',
- videoDuration: '8',
- videoAspectRatio: '16:9',
- };
-};
-
-const slice = createSlice({
- name: 'video',
- initialState: getInitialState(),
- reducers: {
- startingFrameImageChanged: (state, action: PayloadAction) => {
- state.startingFrameImage = action.payload;
- },
-
- videoModelChanged: (state, action: PayloadAction<{ videoModel: ModelIdentifierField | null }>) => {
- const { videoModel } = action.payload;
-
- state.videoModel = videoModel;
-
- if (videoModel?.base === 'veo3') {
- if (!state.videoResolution || !isVeo3Resolution(state.videoResolution)) {
- state.videoResolution = '1080p';
- }
- if (!state.videoDuration || !isVeo3DurationID(state.videoDuration)) {
- state.videoDuration = '8';
- }
- if (!state.videoAspectRatio || !isVeo3AspectRatioID(state.videoAspectRatio)) {
- state.videoAspectRatio = '16:9';
- }
- } else if (videoModel?.base === 'runway') {
- if (!state.videoResolution || !isRunwayResolution(state.videoResolution)) {
- state.videoResolution = '720p';
- }
- if (!state.videoDuration || !isRunwayDurationID(state.videoDuration)) {
- state.videoDuration = '5';
- }
- if (!state.videoAspectRatio || !isRunwayAspectRatioID(state.videoAspectRatio)) {
- state.videoAspectRatio = '16:9';
- }
- }
- },
-
- videoResolutionChanged: (state, action: PayloadAction) => {
- state.videoResolution = action.payload;
- },
-
- videoDurationChanged: (state, action: PayloadAction) => {
- state.videoDuration = action.payload;
- },
-
- videoAspectRatioChanged: (state, action: PayloadAction) => {
- state.videoAspectRatio = action.payload;
- },
- },
-});
-
-export const {
- startingFrameImageChanged,
- videoModelChanged,
- videoResolutionChanged,
- videoDurationChanged,
- videoAspectRatioChanged,
-} = slice.actions;
-
-export const videoSliceConfig: SliceConfig = {
- slice,
- schema: zVideoState,
- getInitialState,
- persistConfig: {
- migrate: (state) => {
- assert(isPlainObject(state));
- if (!('_version' in state)) {
- state._version = 1;
- }
- if (state._version === 1) {
- state._version = 2;
- if (state.startingFrameImage) {
- // startingFrameImage changed from ImageWithDims to CroppableImageWithDims
- state.startingFrameImage = zCroppableImageWithDims.parse({ original: state.startingFrameImage });
- }
- }
- return zVideoState.parse(state);
- },
- },
-};
-
-export const selectVideoSlice = (state: RootState) => state.video;
-const createVideoSelector = (selector: Selector) => createSelector(selectVideoSlice, selector);
-
-export const selectStartingFrameImage = createVideoSelector((video) => video.startingFrameImage);
-export const selectVideoModel = createVideoSelector((video) => video.videoModel);
-export const selectVideoModelKey = createVideoSelector((video) => video.videoModel?.key);
-export const selectVideoResolution = createVideoSelector((video) => video.videoResolution);
-export const selectVideoDuration = createVideoSelector((video) => video.videoDuration);
-export const selectVideoAspectRatio = createVideoSelector((video) => video.videoAspectRatio);
-export const selectIsVeo3 = createVideoSelector((video) => video.videoModel?.base === 'veo3');
-export const selectIsRunway = createVideoSelector((video) => video.videoModel?.base === 'runway');
-export const selectVideoModelConfig = createSelector(
- selectModelConfigsQuery,
- selectVideoSlice,
- (modelConfigs, { videoModel }) => {
- if (!modelConfigs.data) {
- return null;
- }
- if (!videoModel) {
- return null;
- }
- const modelConfig = modelConfigsAdapterSelectors.selectById(modelConfigs.data, videoModel.key);
- if (!modelConfig) {
- return null;
- }
- if (!isVideoModelConfig(modelConfig)) {
- return null;
- }
- return modelConfig;
- }
-);
-export const selectVideoModelRequiresStartingFrame = createSelector(
- selectVideoModel,
- (model) => !!model && REQUIRES_STARTING_FRAME_BASE_MODELS.includes(model.base)
-);
diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts
index 3f879db257c..b14e566d3d1 100644
--- a/invokeai/frontend/web/src/features/parameters/types/constants.ts
+++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts
@@ -37,26 +37,6 @@ export const CLIP_SKIP_MAP: { [key in BaseModelType]?: { maxClip: number; marker
maxClip: 0,
markers: [],
},
- imagen3: {
- maxClip: 0,
- markers: [],
- },
- imagen4: {
- maxClip: 0,
- markers: [],
- },
- 'chatgpt-4o': {
- maxClip: 0,
- markers: [],
- },
- 'flux-kontext': {
- maxClip: 0,
- markers: [],
- },
- 'gemini-2.5': {
- maxClip: 0,
- markers: [],
- },
};
/**
diff --git a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts
index d1bccc691ab..57d4e8acb50 100644
--- a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts
+++ b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts
@@ -133,6 +133,15 @@ export type ParameterSpandrelImageToImageModel = z.infer;
+export const PARAMETER_STRENGTH_CONSTRAINTS = {
+ initial: 0.7,
+ sliderMin: 0,
+ sliderMax: 1,
+ numberInputMin: 0,
+ numberInputMax: 1,
+ fineStep: 0.01,
+ coarseStep: 0.05,
+};
// #endregion
// #region SeamlessX
diff --git a/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts b/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts
index 4318c65144b..7fe7311db93 100644
--- a/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts
+++ b/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts
@@ -17,11 +17,6 @@ export const getOptimalDimension = (base?: BaseModelType | null): number => {
case 'flux':
case 'sd-3':
case 'cogview4':
- case 'imagen3':
- case 'imagen4':
- case 'chatgpt-4o':
- case 'flux-kontext':
- case 'gemini-2.5':
default:
return 1024;
}
@@ -80,9 +75,6 @@ export const getGridSize = (base?: BaseModelType | null): number => {
case 'sd-1':
case 'sd-2':
case 'sdxl':
- case 'imagen3':
- case 'chatgpt-4o':
- case 'flux-kontext':
default:
return 8;
}
diff --git a/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionMenu.tsx b/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionMenu.tsx
deleted file mode 100644
index d337926fdb0..00000000000
--- a/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionMenu.tsx
+++ /dev/null
@@ -1,80 +0,0 @@
-import { IconButton, Menu, MenuButton, MenuItem, MenuList, Text } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { useAppStore } from 'app/store/storeHooks';
-import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
-import { WrappedError } from 'common/util/result';
-import { toast } from 'features/toast/toast';
-import { useCallback } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiMagicWandBold } from 'react-icons/pi';
-import type { ImageDTO } from 'services/api/types';
-
-import { expandPrompt } from './expand';
-import { promptExpansionApi } from './state';
-
-export const PromptExpansionMenu = () => {
- const { dispatch, getState } = useAppStore();
- const { t } = useTranslation();
- const { isPending } = useStore(promptExpansionApi.$state);
-
- const onUploadStarted = useCallback(() => {
- promptExpansionApi.setPending();
- }, []);
-
- const onUpload = useCallback(
- (imageDTO: ImageDTO) => {
- promptExpansionApi.setPending(imageDTO);
- expandPrompt({ dispatch, getState, imageDTO });
- },
- [dispatch, getState]
- );
-
- const onUploadError = useCallback(
- (error: unknown) => {
- const wrappedError = WrappedError.wrap(error);
- promptExpansionApi.setError(wrappedError);
- toast({
- id: 'UPLOAD_AND_PROMPT_GENERATION_FAILED',
- title: t('toast.uploadAndPromptGenerationFailed'),
- status: 'error',
- });
- },
- [t]
- );
-
- const uploadApi = useImageUploadButton({
- allowMultiple: false,
- onUpload,
- onUploadStarted,
- onError: onUploadError,
- });
-
- const onClickExpandPrompt = useCallback(() => {
- promptExpansionApi.setPending();
- expandPrompt({ dispatch, getState });
- }, [dispatch, getState]);
-
- return (
- <>
-
-
- >
- );
-};
diff --git a/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionOverlay.tsx b/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionOverlay.tsx
deleted file mode 100644
index ccc2ff7853a..00000000000
--- a/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionOverlay.tsx
+++ /dev/null
@@ -1,68 +0,0 @@
-import { Box, Flex, Image, Spinner, Text } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { PromptExpansionResultOverlay } from 'features/prompt/PromptExpansion/PromptExpansionResultOverlay';
-import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiMagicWandBold } from 'react-icons/pi';
-
-import { promptExpansionApi } from './state';
-
-export const PromptExpansionOverlay = memo(() => {
- const { isSuccess, isPending, result, imageDTO } = useStore(promptExpansionApi.$state);
- const { t } = useTranslation();
-
- // Show result overlay when completed
- if (isSuccess) {
- return ;
- }
-
- // Show pending overlay when pending
- if (!isPending) {
- return null;
- }
-
- return (
-
- {/* Show dimmed source image if available */}
- {imageDTO && (
-
-
-
- )}
-
-
-
-
-
-
-
- {t('prompt.expandingPrompt')}
-
-
-
- );
-});
-
-PromptExpansionOverlay.displayName = 'PromptExpansionOverlay';
diff --git a/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionResultOverlay.tsx b/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionResultOverlay.tsx
deleted file mode 100644
index 015bf5946d1..00000000000
--- a/invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionResultOverlay.tsx
+++ /dev/null
@@ -1,76 +0,0 @@
-import { ButtonGroup, Flex, Icon, IconButton, Text, Tooltip } from '@invoke-ai/ui-library';
-import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { positivePromptChanged, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice';
-import { useCallback } from 'react';
-import { PiCheckBold, PiMagicWandBold, PiPlusBold, PiXBold } from 'react-icons/pi';
-
-import { promptExpansionApi } from './state';
-
-interface PromptExpansionResultOverlayProps {
- expandedText: string;
-}
-
-export const PromptExpansionResultOverlay = ({ expandedText }: PromptExpansionResultOverlayProps) => {
- const dispatch = useAppDispatch();
- const positivePrompt = useAppSelector(selectPositivePrompt);
-
- const handleReplace = useCallback(() => {
- dispatch(positivePromptChanged(expandedText));
- promptExpansionApi.reset();
- }, [dispatch, expandedText]);
-
- const handleInsert = useCallback(() => {
- const currentText = positivePrompt;
- const newText = currentText ? `${currentText}\n${expandedText}` : expandedText;
- dispatch(positivePromptChanged(newText));
- promptExpansionApi.reset();
- }, [dispatch, expandedText, positivePrompt]);
-
- const handleDiscard = useCallback(() => {
- promptExpansionApi.reset();
- }, []);
-
- return (
-
-
-
-
- {expandedText}
-
-
-
-
-
-
- }
- colorScheme="invokeGreen"
- size="xs"
- aria-label="Replace"
- />
-
-
-
- }
- colorScheme="invokeBlue"
- size="xs"
- aria-label="Insert"
- />
-
-
-
- }
- colorScheme="invokeRed"
- size="xs"
- aria-label="Discard"
- />
-
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/prompt/PromptExpansion/expand.ts b/invokeai/frontend/web/src/features/prompt/PromptExpansion/expand.ts
deleted file mode 100644
index dbd8187b461..00000000000
--- a/invokeai/frontend/web/src/features/prompt/PromptExpansion/expand.ts
+++ /dev/null
@@ -1,42 +0,0 @@
-import type { AppDispatch, AppGetState } from 'app/store/store';
-import { toast } from 'features/toast/toast';
-import { t } from 'i18next';
-import { buildRunGraphDependencies, runGraph } from 'services/api/run-graph';
-import type { ImageDTO } from 'services/api/types';
-import { $socket } from 'services/events/stores';
-import { assert } from 'tsafe';
-
-import { buildPromptExpansionGraph } from './graph';
-import { promptExpansionApi } from './state';
-
-export const expandPrompt = async (arg: { dispatch: AppDispatch; getState: AppGetState; imageDTO?: ImageDTO }) => {
- const { dispatch, getState, imageDTO } = arg;
- const socket = $socket.get();
- if (!socket) {
- return;
- }
- const { graph, outputNodeId } = buildPromptExpansionGraph({
- state: getState(),
- imageDTO,
- });
- const dependencies = buildRunGraphDependencies(dispatch, socket);
- try {
- const { output } = await runGraph({
- graph,
- outputNodeId,
- dependencies,
- options: {
- prepend: true,
- },
- });
- assert(output.type === 'string_output');
- promptExpansionApi.setSuccess(output.value);
- } catch {
- promptExpansionApi.reset();
- toast({
- id: 'PROMPT_EXPANSION_FAILED',
- title: t('toast.promptExpansionFailed'),
- status: 'error',
- });
- }
-};
diff --git a/invokeai/frontend/web/src/features/prompt/PromptExpansion/graph.ts b/invokeai/frontend/web/src/features/prompt/PromptExpansion/graph.ts
deleted file mode 100644
index dcd4b8ab6d3..00000000000
--- a/invokeai/frontend/web/src/features/prompt/PromptExpansion/graph.ts
+++ /dev/null
@@ -1,43 +0,0 @@
-import type { RootState } from 'app/store/store';
-import { getPrefixedId } from 'features/controlLayers/konva/util';
-import { selectBase, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice';
-import { imageDTOToImageField } from 'features/controlLayers/store/util';
-import { Graph } from 'features/nodes/util/graph/generation/Graph';
-import type { ImageDTO } from 'services/api/types';
-import { assert } from 'tsafe';
-
-export const buildPromptExpansionGraph = ({
- state,
- imageDTO,
-}: {
- state: RootState;
- imageDTO?: ImageDTO;
-}): { graph: Graph; outputNodeId: string } => {
- const base = selectBase(state);
- assert(base, 'No main model found in state');
-
- const architecture = ['sdxl', 'sdxl-refiner'].includes(base) ? 'tag_based' : 'sentence_based';
-
- if (imageDTO) {
- const graph = new Graph(getPrefixedId('claude-analyze-image-graph'));
- const outputNode = graph.addNode({
- // @ts-expect-error: These nodes are not available in the OSS application
- type: 'claude_analyze_image',
- id: getPrefixedId('claude_analyze_image'),
- model_architecture: architecture,
- image: imageDTOToImageField(imageDTO),
- });
- return { graph, outputNodeId: outputNode.id };
- } else {
- const positivePrompt = selectPositivePrompt(state);
- const graph = new Graph(getPrefixedId('claude-expand-prompt-graph'));
- const outputNode = graph.addNode({
- // @ts-expect-error: These nodes are not available in the OSS application
- type: 'claude_expand_prompt',
- id: getPrefixedId('claude_expand_prompt'),
- model_architecture: architecture,
- prompt: positivePrompt,
- });
- return { graph, outputNodeId: outputNode.id };
- }
-};
diff --git a/invokeai/frontend/web/src/features/prompt/PromptExpansion/state.ts b/invokeai/frontend/web/src/features/prompt/PromptExpansion/state.ts
deleted file mode 100644
index 14cafe664a6..00000000000
--- a/invokeai/frontend/web/src/features/prompt/PromptExpansion/state.ts
+++ /dev/null
@@ -1,98 +0,0 @@
-import { deepClone } from 'common/util/deepClone';
-import { atom } from 'nanostores';
-import type { ImageDTO } from 'services/api/types';
-
-type SuccessState = {
- isSuccess: true;
- isError: false;
- isPending: false;
- result: string;
- error: null;
- imageDTO?: ImageDTO;
-};
-
-type ErrorState = {
- isSuccess: false;
- isError: true;
- isPending: false;
- result: null;
- error: Error;
- imageDTO?: ImageDTO;
-};
-
-type PendingState = {
- isSuccess: false;
- isError: false;
- isPending: true;
- result: null;
- error: null;
- imageDTO?: ImageDTO;
-};
-
-type IdleState = {
- isSuccess: false;
- isError: false;
- isPending: false;
- result: null;
- error: null;
- imageDTO?: ImageDTO;
-};
-
-export type PromptExpansionRequestState = IdleState | PendingState | SuccessState | ErrorState;
-
-const IDLE_STATE: IdleState = {
- isSuccess: false,
- isError: false,
- isPending: false,
- result: null,
- error: null,
- imageDTO: undefined,
-};
-
-const $state = atom(deepClone(IDLE_STATE));
-
-const reset = () => {
- $state.set(deepClone(IDLE_STATE));
-};
-
-const setPending = (imageDTO?: ImageDTO) => {
- $state.set({
- ...$state.get(),
- isSuccess: false,
- isError: false,
- isPending: true,
- result: null,
- error: null,
- imageDTO,
- });
-};
-
-const setSuccess = (result: string) => {
- $state.set({
- ...$state.get(),
- isSuccess: true,
- isError: false,
- isPending: false,
- result,
- error: null,
- });
-};
-
-const setError = (error: Error) => {
- $state.set({
- ...$state.get(),
- isSuccess: false,
- isError: true,
- isPending: false,
- result: null,
- error,
- });
-};
-
-export const promptExpansionApi = {
- $state,
- reset,
- setPending,
- setSuccess,
- setError,
-};
diff --git a/invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx b/invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx
deleted file mode 100644
index 243aee60f16..00000000000
--- a/invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx
+++ /dev/null
@@ -1,27 +0,0 @@
-import type { ButtonProps } from '@invoke-ai/ui-library';
-import { Button } from '@invoke-ai/ui-library';
-import { useCancelAllExceptCurrentQueueItemDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
-import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiXCircle } from 'react-icons/pi';
-
-export const CancelAllExceptCurrentButton = memo((props: ButtonProps) => {
- const { t } = useTranslation();
- const api = useCancelAllExceptCurrentQueueItemDialog();
-
- return (
- }
- colorScheme="error"
- onClick={api.openDialog}
- {...props}
- >
- {t('queue.cancelAllExceptCurrentTooltip')}
-
- );
-});
-
-CancelAllExceptCurrentButton.displayName = 'CancelAllExceptCurrentButton';
diff --git a/invokeai/frontend/web/src/features/queue/components/ClearModelCacheButton.tsx b/invokeai/frontend/web/src/features/queue/components/ClearModelCacheButton.tsx
index 26458fae6c1..e6169b210bb 100644
--- a/invokeai/frontend/web/src/features/queue/components/ClearModelCacheButton.tsx
+++ b/invokeai/frontend/web/src/features/queue/components/ClearModelCacheButton.tsx
@@ -1,12 +1,10 @@
import { Button } from '@invoke-ai/ui-library';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { toast } from 'features/toast/toast';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { useEmptyModelCacheMutation } from 'services/api/endpoints/models';
const ClearModelCacheButton = () => {
- const isModelCacheEnabled = useFeatureStatus('modelCache');
const [emptyModelCache, { isLoading }] = useEmptyModelCacheMutation();
const { t } = useTranslation();
@@ -25,10 +23,6 @@ const ClearModelCacheButton = () => {
}
}, [emptyModelCache, t]);
- if (!isModelCacheEnabled) {
- return <>>;
- }
-
return (
- {allowPrivateStylePresets ? : }
+
) => {
- const { field } = useController(props);
- const stylePresetModalState = useStore($stylePresetModalState);
- const { t } = useTranslation();
-
- const onChange = useCallback(
- (v) => {
- if (v) {
- field.onChange(v.value);
- }
- },
- [field]
- );
-
- const value = useMemo(() => {
- return OPTIONS.find((opt) => opt.value === field.value);
- }, [field.value]);
-
- return (
-
- {t('stylePresets.type')}
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/stylePresets/components/StylePresetImage.tsx b/invokeai/frontend/web/src/features/stylePresets/components/StylePresetImage.tsx
index 3a960147719..a258b60efee 100644
--- a/invokeai/frontend/web/src/features/stylePresets/components/StylePresetImage.tsx
+++ b/invokeai/frontend/web/src/features/stylePresets/components/StylePresetImage.tsx
@@ -1,6 +1,4 @@
import { Flex, Icon, Image, Tooltip } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $crossOrigin } from 'app/store/nanostores/authToken';
import { typedMemo } from 'common/util/typedMemo';
import { PiImage } from 'react-icons/pi';
@@ -8,8 +6,6 @@ const IMAGE_THUMBNAIL_SIZE = '40px';
const FALLBACK_ICON_SIZE = '24px';
const StylePresetImage = ({ presetImageUrl, imageWidth }: { presetImageUrl: string | null; imageWidth?: number }) => {
- const crossOrigin = useStore($crossOrigin);
-
return (
{
const searchTerm = useAppSelector(selectStylePresetSearchTerm);
- const allowPrivateStylePresets = useAppSelector(selectAllowPrivateStylePresets);
const { data } = useListStylePresetsQuery(undefined, {
selectFromResult: ({ data }) => {
const filteredData =
@@ -33,8 +31,6 @@ export const StylePresetMenu = () => {
) => {
if (preset.type === 'default') {
acc.defaultPresets.push(preset);
- } else if (preset.type === 'project') {
- acc.sharedPresets.push(preset);
} else {
acc.presets.push(preset);
}
@@ -64,9 +60,6 @@ export const StylePresetMenu = () => {
- {allowPrivateStylePresets && (
-
- )}
);
diff --git a/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx b/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx
index bb2eb14a687..dd729f067c2 100644
--- a/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx
+++ b/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx
@@ -17,7 +17,7 @@ import {
} from '@invoke-ai/ui-library';
import { deepClone } from 'common/util/deepClone';
import DataViewer from 'features/gallery/components/ImageMetadataViewer/DataViewer';
-import { discordLink, githubLink, websiteLink } from 'features/system/store/constants';
+import { discordLink, githubLink } from 'features/system/store/constants';
import InvokeLogoYellow from 'public/assets/images/invoke-tag-lrg.svg';
import type { ReactElement } from 'react';
import { cloneElement, memo, useMemo } from 'react';
@@ -82,7 +82,6 @@ const AboutModal = ({ children }: AboutModalProps) => {
{t('common.aboutHeading')}
{t('common.aboutDesc')}
-
diff --git a/invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts b/invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts
index 3582baec50e..981c4d0f511 100644
--- a/invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts
+++ b/invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts
@@ -1,4 +1,3 @@
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { useMemo } from 'react';
import { type HotkeyCallback, type Options, useHotkeys } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next';
@@ -32,8 +31,6 @@ const formatKeysForPlatform = (keys: string[], isMacOS: boolean): string[][] =>
export const useHotkeyData = (): HotkeysData => {
const { t } = useTranslation();
- const isModelManagerEnabled = useFeatureStatus('modelManager');
- const isVideoEnabled = useFeatureStatus('video');
const isMacOS = useMemo(() => {
return navigator.userAgent.toLowerCase().includes('mac');
}, []);
@@ -81,31 +78,14 @@ export const useHotkeyData = (): HotkeysData => {
addHotkey('app', 'selectGenerateTab', ['1']);
addHotkey('app', 'selectCanvasTab', ['2']);
addHotkey('app', 'selectUpscalingTab', ['3']);
+ addHotkey('app', 'selectWorkflowsTab', ['4']);
+ addHotkey('app', 'selectModelsTab', ['5']);
+ addHotkey('app', 'selectQueueTab', ['6']);
+
// Prompt/history navigation (when prompt textarea is focused)
addHotkey('app', 'promptHistoryPrev', ['alt+up']);
addHotkey('app', 'promptHistoryNext', ['alt+down']);
- if (isVideoEnabled) {
- addHotkey('app', 'selectVideoTab', ['4']);
- addHotkey('app', 'selectWorkflowsTab', ['5']);
- if (isModelManagerEnabled) {
- addHotkey('app', 'selectModelsTab', ['6']);
- addHotkey('app', 'selectQueueTab', ['7']);
- } else {
- addHotkey('app', 'selectModelsTab', ['DISABLED'], false);
- addHotkey('app', 'selectQueueTab', ['6']);
- }
- } else {
- addHotkey('app', 'selectVideoTab', ['DISABLED'], false);
- addHotkey('app', 'selectWorkflowsTab', ['4']);
- if (isModelManagerEnabled) {
- addHotkey('app', 'selectModelsTab', ['5']);
- addHotkey('app', 'selectQueueTab', ['6']);
- } else {
- addHotkey('app', 'selectModelsTab', ['DISABLED'], false);
- addHotkey('app', 'selectQueueTab', ['5']);
- }
- }
addHotkey('app', 'focusPrompt', ['alt+a']);
addHotkey('app', 'toggleLeftPanel', ['t', 'o']);
addHotkey('app', 'toggleRightPanel', ['g']);
@@ -188,7 +168,7 @@ export const useHotkeyData = (): HotkeysData => {
addHotkey('gallery', 'starImage', ['.']);
return data;
- }, [isMacOS, isVideoEnabled, isModelManagerEnabled, t]);
+ }, [isMacOS, t]);
return hotkeysData;
};
diff --git a/invokeai/frontend/web/src/features/system/components/InvokeAILogoComponent.tsx b/invokeai/frontend/web/src/features/system/components/InvokeAILogoComponent.tsx
index f5817fc658f..0716a73c8c4 100644
--- a/invokeai/frontend/web/src/features/system/components/InvokeAILogoComponent.tsx
+++ b/invokeai/frontend/web/src/features/system/components/InvokeAILogoComponent.tsx
@@ -1,6 +1,4 @@
import { Image, Text, Tooltip } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $logo } from 'app/store/nanostores/logo';
import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg';
import { memo, useMemo, useRef } from 'react';
import { useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
@@ -8,7 +6,6 @@ import { useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
const InvokeAILogoComponent = () => {
const { data: appVersion } = useGetAppVersionQuery();
const ref = useRef(null);
- const logoOverride = useStore($logo);
const tooltip = useMemo(() => {
if (appVersion) {
return v{appVersion.version};
@@ -16,10 +13,6 @@ const InvokeAILogoComponent = () => {
return null;
}, [appVersion]);
- if (logoOverride) {
- return logoOverride;
- }
-
return (
{
const { t } = useTranslation();
const dispatch = useAppDispatch();
const language = useAppSelector(selectLanguage);
- const isLocalizationEnabled = useFeatureStatus('localization');
const value = useMemo(() => options.find((o) => o.value === language), [language]);
@@ -56,7 +54,7 @@ export const SettingsLanguageSelect = memo(() => {
[dispatch]
);
return (
-
+
{t('common.languagePickerLabel')}
diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx
index a03f67e585d..bbd7103d6c2 100644
--- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx
+++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx
@@ -11,33 +11,18 @@ import {
} from '@invoke-ai/ui-library';
import AboutModal from 'features/system/components/AboutModal/AboutModal';
import HotkeysModal from 'features/system/components/HotkeysModal/HotkeysModal';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { discordLink, githubLink } from 'features/system/store/constants';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
-import {
- PiBugBeetleBold,
- PiGearSixFill,
- PiInfoBold,
- PiKeyboardBold,
- PiShareNetworkFill,
- PiToggleRightFill,
- PiUsersBold,
-} from 'react-icons/pi';
+import { PiBugBeetleBold, PiGearSixFill, PiInfoBold, PiKeyboardBold, PiToggleRightFill } from 'react-icons/pi';
import { RiDiscordFill, RiGithubFill } from 'react-icons/ri';
import SettingsModal from './SettingsModal';
-import { SettingsUpsellMenuItem } from './SettingsUpsellMenuItem';
const SettingsMenu = () => {
const { t } = useTranslation();
const { isOpen, onOpen, onClose } = useDisclosure();
useGlobalMenuClose(onClose);
- const isBugLinkEnabled = useFeatureStatus('bugLink');
- const isDiscordLinkEnabled = useFeatureStatus('discordLink');
- const isGithubLinkEnabled = useFeatureStatus('githubLink');
- const isAboutModalEnabled = useFeatureStatus('aboutModal');
-
return (
diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx
deleted file mode 100644
index c5f9a13c2b3..00000000000
--- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx
+++ /dev/null
@@ -1,19 +0,0 @@
-import { Flex, Icon, MenuItem, Text, Tooltip } from '@invoke-ai/ui-library';
-import type { ReactElement } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiArrowUpBold } from 'react-icons/pi';
-
-export const SettingsUpsellMenuItem = ({ menuText, menuIcon }: { menuText: string; menuIcon: ReactElement }) => {
- const { t } = useTranslation();
-
- return (
-
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts b/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts
index 26ad8a78ad7..c9f1e524bfa 100644
--- a/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts
+++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts
@@ -12,12 +12,11 @@ type UseClearIntermediatesReturn = {
refetchIntermediatesCount: () => void;
};
-export const useClearIntermediates = (shouldShowClearIntermediates: boolean): UseClearIntermediatesReturn => {
+export const useClearIntermediates = (): UseClearIntermediatesReturn => {
const { t } = useTranslation();
const { data: intermediatesCount, refetch: refetchIntermediatesCount } = useGetIntermediatesCountQuery(undefined, {
refetchOnMountOrArgChange: true,
- skip: !shouldShowClearIntermediates,
});
const [_clearIntermediates, { isLoading }] = useClearIntermediatesMutation();
diff --git a/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx b/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx
index d97a0bed341..4f037224a62 100644
--- a/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx
+++ b/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx
@@ -1,17 +1,11 @@
import { ExternalLink, Flex, Spacer, Text } from '@invoke-ai/ui-library';
-import { useAppDispatch } from 'app/store/storeHooks';
import type { VideoData } from 'features/system/components/VideosModal/data';
-import { videoModalLinkClicked } from 'features/system/store/actions';
-import { memo, useCallback } from 'react';
+import { memo } from 'react';
import { useTranslation } from 'react-i18next';
export const VideoCard = memo(({ video }: { video: VideoData }) => {
const { t } = useTranslation();
- const dispatch = useAppDispatch();
const { tKey, link } = video;
- const handleLinkClick = useCallback(() => {
- dispatch(videoModalLinkClicked(t(`supportVideos.videos.${tKey}.title`)));
- }, [dispatch, t, tKey]);
return (
@@ -20,7 +14,7 @@ export const VideoCard = memo(({ video }: { video: VideoData }) => {
{t(`supportVideos.videos.${tKey}.title`)}
-
+
{t(`supportVideos.videos.${tKey}.description`)}
diff --git a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx
index 2db109479f5..818f531820e 100644
--- a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx
+++ b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx
@@ -10,7 +10,6 @@ import {
ModalOverlay,
Text,
} from '@invoke-ai/ui-library';
-import { useAppDispatch } from 'app/store/storeHooks';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
import { buildUseDisclosure } from 'common/hooks/useBoolean';
import {
@@ -19,62 +18,36 @@ import {
supportVideos,
} from 'features/system/components/VideosModal/data';
import { VideoCardList } from 'features/system/components/VideosModal/VideoCardList';
-import { videoModalLinkClicked } from 'features/system/store/actions';
import { discordLink } from 'features/system/store/constants';
-import { memo, useCallback } from 'react';
+import { memo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
export const [useVideosModal] = buildUseDisclosure(false);
const GettingStartedPlaylistLink = () => {
- const dispatch = useAppDispatch();
- const handleLinkClick = useCallback(() => {
- dispatch(videoModalLinkClicked('Getting Started playlist'));
- }, [dispatch]);
-
return (
);
};
const StudioSessionsPlaylistLink = () => {
- const dispatch = useAppDispatch();
- const handleLinkClick = useCallback(() => {
- dispatch(videoModalLinkClicked('Studio Sessions playlist'));
- }, [dispatch]);
-
return (
);
};
const DiscordLink = () => {
- const dispatch = useAppDispatch();
- const handleLinkClick = useCallback(() => {
- dispatch(videoModalLinkClicked('Discord'));
- }, [dispatch]);
-
- return (
-
- );
+ return ;
};
const components = {
diff --git a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx
index 3b5169a14ae..bf99afdbc8e 100644
--- a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx
+++ b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx
@@ -1,21 +1,17 @@
import { IconButton } from '@invoke-ai/ui-library';
-import { useAppDispatch } from 'app/store/storeHooks';
import { useVideosModal } from 'features/system/components/VideosModal/VideosModal';
-import { videoModalOpened } from 'features/system/store/actions';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiYoutubeLogoFill } from 'react-icons/pi';
export const VideosModalButton = memo(() => {
const { t } = useTranslation();
- const dispatch = useAppDispatch();
const videosModal = useVideosModal();
const onClickOpen = useCallback(() => {
- dispatch(videoModalOpened());
videosModal.open();
- }, [videosModal, dispatch]);
+ }, [videosModal]);
return (
{
- const selectIsFeatureEnabled = useMemo(
- () =>
- createSelector(selectConfigSlice, (config) => {
- return !(
- config.disabledFeatures.includes(feature as AppFeature) ||
- config.disabledSDFeatures.includes(feature as SDFeature) ||
- config.disabledTabs.includes(feature as TabName)
- );
- }),
- [feature]
- );
-
- const isFeatureEnabled = useAppSelector(selectIsFeatureEnabled);
-
- return isFeatureEnabled;
-};
diff --git a/invokeai/frontend/web/src/features/system/store/actions.ts b/invokeai/frontend/web/src/features/system/store/actions.ts
deleted file mode 100644
index 4be0a516256..00000000000
--- a/invokeai/frontend/web/src/features/system/store/actions.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import { createAction } from '@reduxjs/toolkit';
-
-export const videoModalLinkClicked = createAction('system/videoModalLinkClicked');
-export const videoModalOpened = createAction('system/videoModalOpened');
diff --git a/invokeai/frontend/web/src/features/system/store/configSelectors.ts b/invokeai/frontend/web/src/features/system/store/configSelectors.ts
deleted file mode 100644
index 295418b489d..00000000000
--- a/invokeai/frontend/web/src/features/system/store/configSelectors.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-import { createSelector } from '@reduxjs/toolkit';
-import { selectConfigSlice } from 'features/system/store/configSlice';
-
-export const selectAllowPrivateBoards = createSelector(selectConfigSlice, (config) => config.allowPrivateBoards);
diff --git a/invokeai/frontend/web/src/features/system/store/configSlice.ts b/invokeai/frontend/web/src/features/system/store/configSlice.ts
deleted file mode 100644
index a50c478355b..00000000000
--- a/invokeai/frontend/web/src/features/system/store/configSlice.ts
+++ /dev/null
@@ -1,101 +0,0 @@
-import type { PayloadAction, Selector } from '@reduxjs/toolkit';
-import { createSelector, createSlice } from '@reduxjs/toolkit';
-import type { RootState } from 'app/store/store';
-import type { SliceConfig } from 'app/store/types';
-import type { PartialAppConfig } from 'app/types/invokeai';
-import { getDefaultAppConfig, zAppConfig } from 'app/types/invokeai';
-import { merge } from 'es-toolkit/compat';
-import z from 'zod';
-
-const zConfigState = z.object({
- ...zAppConfig.shape,
- didLoad: z.boolean(),
-});
-type ConfigState = z.infer;
-
-const getInitialState = (): ConfigState => ({
- ...getDefaultAppConfig(),
- didLoad: false,
-});
-
-const slice = createSlice({
- name: 'config',
- initialState: getInitialState(),
- reducers: {
- configChanged: (state, action: PayloadAction) => {
- // Handle disabledTabs specially - if provided, it should completely replace the default array
- if (action.payload.disabledTabs !== undefined) {
- state.disabledTabs = action.payload.disabledTabs;
- }
-
- // Merge the rest of the config normally
- merge(state, action.payload);
- state.didLoad = true;
- },
- },
-});
-
-export const { configChanged } = slice.actions;
-
-export const configSliceConfig: SliceConfig = {
- slice,
- schema: zConfigState,
- getInitialState,
-};
-
-export const selectConfigSlice = (state: RootState) => state.config;
-const createConfigSelector = (selector: Selector) => createSelector(selectConfigSlice, selector);
-
-export const selectWidthConfig = createConfigSelector((config) => config.sd.width);
-export const selectHeightConfig = createConfigSelector((config) => config.sd.height);
-export const selectStepsConfig = createConfigSelector((config) => config.sd.steps);
-export const selectCFGScaleConfig = createConfigSelector((config) => config.sd.guidance);
-export const selectGuidanceConfig = createConfigSelector((config) => config.flux.guidance);
-export const selectCLIPSkipConfig = createConfigSelector((config) => config.sd.clipSkip);
-export const selectCFGRescaleMultiplierConfig = createConfigSelector((config) => config.sd.cfgRescaleMultiplier);
-export const selectCanvasCoherenceEdgeSizeConfig = createConfigSelector((config) => config.sd.canvasCoherenceEdgeSize);
-export const selectMaskBlurConfig = createConfigSelector((config) => config.sd.maskBlur);
-export const selectInfillPatchmatchDownscaleSizeConfig = createConfigSelector(
- (config) => config.sd.infillPatchmatchDownscaleSize
-);
-export const selectInfillTileSizeConfig = createConfigSelector((config) => config.sd.infillTileSize);
-export const selectImg2imgStrengthConfig = createConfigSelector((config) => config.sd.img2imgStrength);
-export const selectMaxPromptsConfig = createConfigSelector((config) => config.sd.dynamicPrompts.maxPrompts);
-export const selectIterationsConfig = createConfigSelector((config) => config.sd.iterations);
-
-export const selectMaxUpscaleDimension = createConfigSelector((config) => config.maxUpscaleDimension);
-export const selectAllowPrivateStylePresets = createConfigSelector((config) => config.allowPrivateStylePresets);
-export const selectWorkflowFetchDebounce = createConfigSelector((config) => config.workflowFetchDebounce ?? 300);
-export const selectMetadataFetchDebounce = createConfigSelector((config) => config.metadataFetchDebounce ?? 300);
-
-export const selectIsModelsTabDisabled = createConfigSelector((config) => config.disabledTabs.includes('models'));
-export const selectIsClientSideUploadEnabled = createConfigSelector((config) => config.allowClientSideUpload);
-export const selectAllowPublishWorkflows = createConfigSelector((config) => config.allowPublishWorkflows);
-export const selectAllowPromptExpansion = createConfigSelector((config) => config.allowPromptExpansion);
-export const selectAllowVideo = createConfigSelector((config) => config.allowVideo);
-
-export const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
-export const selectShouldShowCredits = createConfigSelector((config) => config.shouldShowCredits);
-const selectDisabledTabs = createConfigSelector((config) => config.disabledTabs);
-const selectDidLoad = createConfigSelector((config) => config.didLoad);
-export const selectWithGenerateTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('generate') : false
-);
-export const selectWithCanvasTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('canvas') : false
-);
-export const selectWithUpscalingTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('upscaling') : false
-);
-export const selectWithWorkflowsTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('workflows') : false
-);
-export const selectWithModelsTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('models') : false
-);
-export const selectWithQueueTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('queue') : false
-);
-export const selectWithVideoTab = createSelector(selectDidLoad, selectDisabledTabs, (didLoad, disabledTabs) =>
- didLoad ? !disabledTabs.includes('video') : false
-);
diff --git a/invokeai/frontend/web/src/features/system/store/constants.ts b/invokeai/frontend/web/src/features/system/store/constants.ts
index 0ca2d24129e..882609e0ac7 100644
--- a/invokeai/frontend/web/src/features/system/store/constants.ts
+++ b/invokeai/frontend/web/src/features/system/store/constants.ts
@@ -1,4 +1,3 @@
export const githubLink = 'http://github.com/invoke-ai/InvokeAI';
export const githubIssuesLink = 'https://github.com/invoke-ai/InvokeAI/issues';
export const discordLink = 'https://discord.gg/ZmtBAhwWhy';
-export const websiteLink = 'https://www.invoke.com/';
diff --git a/invokeai/frontend/web/src/features/toast/ErrorToastDescription.tsx b/invokeai/frontend/web/src/features/toast/ErrorToastDescription.tsx
index 50e03f770aa..056d1c68c26 100644
--- a/invokeai/frontend/web/src/features/toast/ErrorToastDescription.tsx
+++ b/invokeai/frontend/web/src/features/toast/ErrorToastDescription.tsx
@@ -1,64 +1,36 @@
-import { Flex, IconButton, Text } from '@invoke-ai/ui-library';
-import { useClipboard } from 'common/hooks/useClipboard';
+import { Flex, Text } from '@invoke-ai/ui-library';
import { ExternalLink } from 'features/gallery/components/ImageViewer/NoContentForViewer';
import { t } from 'i18next';
-import { useCallback, useMemo } from 'react';
-import { Trans, useTranslation } from 'react-i18next';
-import { PiCopyBold } from 'react-icons/pi';
+import { useMemo } from 'react';
+import { Trans } from 'react-i18next';
-type DescriptionProps = { errorType: string; errorMessage?: string | null; sessionId: string; isLocal: boolean };
+type DescriptionProps = { errorType: string; errorMessage?: string | null };
export const getTitle = (errorType: string) => {
return errorType === 'OutOfMemoryError' ? t('toast.outOfMemoryError') : t('toast.serverError');
};
-export default function ErrorToastDescription({ errorType, isLocal, sessionId, errorMessage }: DescriptionProps) {
- const { t } = useTranslation();
- const clipboard = useClipboard();
-
+export default function ErrorToastDescription({ errorType, errorMessage }: DescriptionProps) {
const description = useMemo(() => {
if (errorType === 'OutOfMemoryError') {
- if (isLocal) {
- return (
- ,
- }}
- />
- );
- } else {
- return t('toast.outOfMemoryErrorDesc');
- }
+ return (
+ ,
+ }}
+ />
+ );
} else if (errorMessage) {
return `${errorType}: ${errorMessage}`;
}
- }, [errorMessage, errorType, isLocal, t]);
-
- const copySessionId = useCallback(() => clipboard.writeText(sessionId), [sessionId, clipboard]);
+ }, [errorMessage, errorType]);
return (
- {description && (
-
- {description}
-
- )}
- {!isLocal && (
-
-
- {t('toast.sessionRef', { sessionId })}
-
- }
- onClick={copySessionId}
- variant="ghost"
- sx={{ svg: { fill: 'base.50' } }}
- />
-
- )}
+
+ {description}
+
);
}
diff --git a/invokeai/frontend/web/src/features/toast/toast.ts b/invokeai/frontend/web/src/features/toast/toast.ts
index 210725b69e7..6f05476613d 100644
--- a/invokeai/frontend/web/src/features/toast/toast.ts
+++ b/invokeai/frontend/web/src/features/toast/toast.ts
@@ -9,7 +9,7 @@ export const toastApi = createStandaloneToast({
}).toast;
// Slightly modified version of UseToastOptions
-export type ToastConfig = Omit & {
+type ToastConfig = Omit & {
// Only string - Chakra allows numbers
id?: string;
};
diff --git a/invokeai/frontend/web/src/features/ui/README.md b/invokeai/frontend/web/src/features/ui/README.md
new file mode 100644
index 00000000000..f4c262d425d
--- /dev/null
+++ b/invokeai/frontend/web/src/features/ui/README.md
@@ -0,0 +1,26 @@
+# UI/Layout
+
+We use https://github.com/mathuo/dockview for layout. This library supports resizable and dockable panels. Users can drag and drop panels to rearrange them.
+
+The intention when adopting this library was to allow users to create their own custom layouts and save them. However, this feature is not yet implemented and each tab only has a predefined layout.
+
+This works well, but it _is_ fairly complex. You can see that we've needed to write a fairly involved API to manage the layouts: invokeai/frontend/web/src/features/ui/layouts/navigation-api.ts
+
+And the layouts themselves are awkward to define, especially when compared to plain JSX: invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx
+
+This complexity may or may not be worth it.
+
+## Previous approach
+
+Previously we used https://github.com/bvaughn/react-resizable-panels and simple JSX components.
+
+This library is great except it doesn't support absolute size constraints, only relative/percentage constraints. We had a brittle abstraction layer on top of it to try to enforce minimum pixel sizes for panels but it was janky and had FP precision issues causing drifting sizes.
+
+It also doesn't support dockable panels.
+
+## Future possibilities
+
+1. Continue with dockview and implement custom layout saving/loading. We experimented with this and it was _really_ nice. We defined a component for each panel type and use react context to manage state. But we thought that it would be confusing for most users, so we flagged it for a future iteration and instead shipped with predefined layouts.
+2. Switch to a simpler layout library or roll our own.
+
+In hindsight, we should have skipped dockview and found something else that was simpler until we were ready to invest in custom layouts.
diff --git a/invokeai/frontend/web/src/features/ui/components/AppContent.tsx b/invokeai/frontend/web/src/features/ui/components/AppContent.tsx
index 5650e316bfb..d3ab650d7b8 100644
--- a/invokeai/frontend/web/src/features/ui/components/AppContent.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/AppContent.tsx
@@ -5,15 +5,6 @@ import { Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import Loading from 'common/components/Loading/Loading';
-import {
- selectWithCanvasTab,
- selectWithGenerateTab,
- selectWithModelsTab,
- selectWithQueueTab,
- selectWithUpscalingTab,
- selectWithVideoTab,
- selectWithWorkflowsTab,
-} from 'features/system/store/configSlice';
import { VerticalNavBar } from 'features/ui/components/VerticalNavBar';
import { CanvasTabAutoLayout } from 'features/ui/layouts/canvas-tab-auto-layout';
import { GenerateTabAutoLayout } from 'features/ui/layouts/generate-tab-auto-layout';
@@ -21,7 +12,6 @@ import { ModelsTabAutoLayout } from 'features/ui/layouts/models-tab-auto-layout'
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { QueueTabAutoLayout } from 'features/ui/layouts/queue-tab-auto-layout';
import { UpscalingTabAutoLayout } from 'features/ui/layouts/upscaling-tab-auto-layout';
-import { VideoTabAutoLayout } from 'features/ui/layouts/video-tab-auto-layout';
import { WorkflowsTabAutoLayout } from 'features/ui/layouts/workflows-tab-auto-layout';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo } from 'react';
@@ -38,23 +28,15 @@ AppContent.displayName = 'AppContent';
const TabContent = memo(() => {
const tab = useAppSelector(selectActiveTab);
- const withGenerateTab = useAppSelector(selectWithGenerateTab);
- const withCanvasTab = useAppSelector(selectWithCanvasTab);
- const withUpscalingTab = useAppSelector(selectWithUpscalingTab);
- const withWorkflowsTab = useAppSelector(selectWithWorkflowsTab);
- const withModelsTab = useAppSelector(selectWithModelsTab);
- const withQueueTab = useAppSelector(selectWithQueueTab);
- const withVideoTab = useAppSelector(selectWithVideoTab);
return (
- {withGenerateTab && tab === 'generate' && }
- {withCanvasTab && tab === 'canvas' && }
- {withUpscalingTab && tab === 'upscaling' && }
- {withWorkflowsTab && tab === 'workflows' && }
- {withModelsTab && tab === 'models' && }
- {withQueueTab && tab === 'queue' && }
- {withVideoTab && tab === 'video' && }
+ {tab === 'generate' && }
+ {tab === 'canvas' && }
+ {tab === 'upscaling' && }
+ {tab === 'workflows' && }
+ {tab === 'models' && }
+ {tab === 'queue' && }
);
diff --git a/invokeai/frontend/web/src/features/ui/components/Notifications.tsx b/invokeai/frontend/web/src/features/ui/components/Notifications.tsx
index 8a08b5086db..e1d32429af4 100644
--- a/invokeai/frontend/web/src/features/ui/components/Notifications.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/Notifications.tsx
@@ -12,11 +12,7 @@ import {
Portal,
Text,
} from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { createSelector } from '@reduxjs/toolkit';
-import { $didStudioInit } from 'app/hooks/useStudioInitAction';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
-import { selectConfigSlice } from 'features/system/store/configSlice';
import { shouldShowNotificationChanged } from 'features/ui/store/uiSlice';
import InvokeSymbol from 'public/assets/images/invoke-favicon.png';
import { useCallback } from 'react';
@@ -26,20 +22,16 @@ import { useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
import { WhatsNew } from './WhatsNew';
-const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
-
export const Notifications = () => {
const { t } = useTranslation();
- const didStudioInit = useStore($didStudioInit);
const dispatch = useAppDispatch();
const shouldShowNotification = useAppSelector((s) => s.ui.shouldShowNotificationV2);
const resetIndicator = useCallback(() => {
dispatch(shouldShowNotificationChanged(false));
}, [dispatch]);
const { data } = useGetAppVersionQuery();
- const isLocal = useAppSelector(selectIsLocal);
- if (!data || !didStudioInit) {
+ if (!data) {
return null;
}
@@ -63,12 +55,7 @@ export const Notifications = () => {
{t('whatsNew.whatsNewInInvoke')}
- {!!data.version.length &&
- (isLocal ? (
- {`v${data.version}`}
- ) : (
- {data.version}
- ))}
+ {`v${data.version}`}
diff --git a/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelCanvas.tsx b/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelCanvas.tsx
index 0864557ec9f..ddd75d91cf6 100644
--- a/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelCanvas.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelCanvas.tsx
@@ -2,7 +2,7 @@ import { Box, Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
-import { selectIsApiBaseModel, selectIsCogView4, selectIsSDXL } from 'features/controlLayers/store/paramsSlice';
+import { selectIsCogView4, selectIsSDXL } from 'features/controlLayers/store/paramsSlice';
import { Prompts } from 'features/parameters/components/Prompts/Prompts';
import { AdvancedSettingsAccordion } from 'features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion';
import { CompositingSettingsAccordion } from 'features/settingsAccordions/components/CompositingSettingsAccordion/CompositingSettingsAccordion';
@@ -26,8 +26,6 @@ export const ParametersPanelCanvas = memo(() => {
const isCogview4 = useAppSelector(selectIsCogView4);
const isStylePresetsMenuOpen = useStore($isStylePresetsMenuOpen);
- const isApiModel = useAppSelector(selectIsApiBaseModel);
-
return (
@@ -45,9 +43,9 @@ export const ParametersPanelCanvas = memo(() => {
- {!isApiModel && }
+
{isSDXL && }
- {!isCogview4 && !isApiModel && }
+ {!isCogview4 && }
diff --git a/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelGenerate.tsx b/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelGenerate.tsx
index 8265e77801e..f955f6793ca 100644
--- a/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelGenerate.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelGenerate.tsx
@@ -2,7 +2,7 @@ import { Box, Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
-import { selectIsApiBaseModel, selectIsCogView4, selectIsSDXL } from 'features/controlLayers/store/paramsSlice';
+import { selectIsCogView4, selectIsSDXL } from 'features/controlLayers/store/paramsSlice';
import { Prompts } from 'features/parameters/components/Prompts/Prompts';
import { AdvancedSettingsAccordion } from 'features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion';
import { GenerationSettingsAccordion } from 'features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion';
@@ -25,8 +25,6 @@ export const ParametersPanelGenerate = memo(() => {
const isCogview4 = useAppSelector(selectIsCogView4);
const isStylePresetsMenuOpen = useStore($isStylePresetsMenuOpen);
- const isApiModel = useAppSelector(selectIsApiBaseModel);
-
return (
@@ -45,7 +43,7 @@ export const ParametersPanelGenerate = memo(() => {
{isSDXL && }
- {!isCogview4 && !isApiModel && }
+ {!isCogview4 && }
diff --git a/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelVideo.tsx b/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelVideo.tsx
deleted file mode 100644
index 7319956c542..00000000000
--- a/invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelVideo.tsx
+++ /dev/null
@@ -1,45 +0,0 @@
-import { Box, Flex } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
-import { Prompts } from 'features/parameters/components/Prompts/Prompts';
-import { VideoSettingsAccordion } from 'features/settingsAccordions/components/VideoSettingsAccordion/VideoSettingsAccordion';
-import { StylePresetMenu } from 'features/stylePresets/components/StylePresetMenu';
-import { StylePresetMenuTrigger } from 'features/stylePresets/components/StylePresetMenuTrigger';
-import { $isStylePresetsMenuOpen } from 'features/stylePresets/store/stylePresetSlice';
-import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
-import type { CSSProperties } from 'react';
-import { memo } from 'react';
-
-const overlayScrollbarsStyles: CSSProperties = {
- height: '100%',
- width: '100%',
-};
-
-export const ParametersPanelVideo = memo(() => {
- const isStylePresetsMenuOpen = useStore($isStylePresetsMenuOpen);
-
- return (
-
-
-
-
- {isStylePresetsMenuOpen && (
-
-
-
-
-
- )}
-
-
-
-
-
-
-
-
-
- );
-});
-
-ParametersPanelVideo.displayName = 'ParametersPanelVideo';
diff --git a/invokeai/frontend/web/src/features/ui/components/VerticalNavBar.tsx b/invokeai/frontend/web/src/features/ui/components/VerticalNavBar.tsx
index 56271928cf6..4d2696c2e3f 100644
--- a/invokeai/frontend/web/src/features/ui/components/VerticalNavBar.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/VerticalNavBar.tsx
@@ -1,20 +1,8 @@
import { Divider, Flex, Spacer } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
-import { useAppSelector } from 'app/store/storeHooks';
import InvokeAILogoComponent from 'features/system/components/InvokeAILogoComponent';
import SettingsMenu from 'features/system/components/SettingsModal/SettingsMenu';
import StatusIndicator from 'features/system/components/StatusIndicator';
import { VideosModalButton } from 'features/system/components/VideosModal/VideosModalButton';
-import {
- selectWithCanvasTab,
- selectWithGenerateTab,
- selectWithModelsTab,
- selectWithQueueTab,
- selectWithUpscalingTab,
- selectWithVideoTab,
- selectWithWorkflowsTab,
-} from 'features/system/store/configSlice';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import {
@@ -24,7 +12,6 @@ import {
PiFrameCornersBold,
PiQueueBold,
PiTextAaBold,
- PiVideoBold,
} from 'react-icons/pi';
import { Notifications } from './Notifications';
@@ -32,34 +19,29 @@ import { TabButton } from './TabButton';
export const VerticalNavBar = memo(() => {
const { t } = useTranslation();
- const customNavComponent = useStore($customNavComponent);
- const withGenerateTab = useAppSelector(selectWithGenerateTab);
- const withCanvasTab = useAppSelector(selectWithCanvasTab);
- const withUpscalingTab = useAppSelector(selectWithUpscalingTab);
- const withWorkflowsTab = useAppSelector(selectWithWorkflowsTab);
- const withModelsTab = useAppSelector(selectWithModelsTab);
- const withQueueTab = useAppSelector(selectWithQueueTab);
- const withVideoTab = useAppSelector(selectWithVideoTab);
return (
+
- {withGenerateTab && } label={t('ui.tabs.generate')} />}
- {withCanvasTab && } label={t('ui.tabs.canvas')} />}
- {withUpscalingTab && } label={t('ui.tabs.upscaling')} />}
- {withVideoTab && } label={t('ui.tabs.video')} />}
- {withWorkflowsTab && } label={t('ui.tabs.workflows')} />}
+ } label={t('ui.tabs.generate')} />
+ } label={t('ui.tabs.canvas')} />
+ } label={t('ui.tabs.upscaling')} />
+ } label={t('ui.tabs.workflows')} />
+
+
- {withModelsTab && } label={t('ui.tabs.models')} />}
- {withQueueTab && } label={t('ui.tabs.queue')} />}
+ } label={t('ui.tabs.models')} />
+ } label={t('ui.tabs.queue')} />
+
- {customNavComponent ? customNavComponent : }
+
);
});
diff --git a/invokeai/frontend/web/src/features/ui/components/WhatsNew.tsx b/invokeai/frontend/web/src/features/ui/components/WhatsNew.tsx
index b17091d5b51..bbee1bfe838 100644
--- a/invokeai/frontend/web/src/features/ui/components/WhatsNew.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/WhatsNew.tsx
@@ -1,16 +1,9 @@
import { ExternalLink, Flex, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { createSelector } from '@reduxjs/toolkit';
-import { $whatsNew } from 'app/store/nanostores/whatsNew';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectConfigSlice } from 'features/system/store/configSlice';
import type { ReactNode } from 'react';
import { useMemo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
import { useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
-const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
-
const components = {
StrongComponent: ,
};
@@ -18,18 +11,8 @@ const components = {
export const WhatsNew = () => {
const { t } = useTranslation();
const { data } = useGetAppVersionQuery();
- const isLocal = useAppSelector(selectIsLocal);
- const whatsNew = useStore($whatsNew);
const items = useMemo(() => {
- if (whatsNew) {
- return whatsNew;
- }
-
- if (data?.highlights?.length) {
- return data.highlights.map((highlight, index) => {highlight});
- }
-
const tKeys = t('whatsNew.items', {
returnObjects: true,
});
@@ -39,7 +22,7 @@ export const WhatsNew = () => {
));
- }, [data?.highlights, t, whatsNew]);
+ }, [t]);
return (
@@ -49,11 +32,7 @@ export const WhatsNew = () => {
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.readReleaseNotes')}
- href={
- isLocal
- ? `https://github.com/invoke-ai/InvokeAI/releases/tag/v${data?.version}`
- : 'https://support.invoke.ai/support/solutions/articles/151000178246'
- }
+ href={`https://github.com/invoke-ai/InvokeAI/releases/tag/v${data?.version}`}
/>
= {
workflows: PiFlowArrowBold,
models: PiCubeBold,
queue: PiQueueBold,
- video: PiVideoBold,
};
export const DockviewTabLaunchpad = memo((props: IDockviewPanelHeaderProps) => {
diff --git a/invokeai/frontend/web/src/features/ui/layouts/LaunchpadStartingFrameButton.tsx b/invokeai/frontend/web/src/features/ui/layouts/LaunchpadStartingFrameButton.tsx
deleted file mode 100644
index 2f6d12ba3c8..00000000000
--- a/invokeai/frontend/web/src/features/ui/layouts/LaunchpadStartingFrameButton.tsx
+++ /dev/null
@@ -1,50 +0,0 @@
-import { Flex, Heading, Icon, Text } from '@invoke-ai/ui-library';
-import { useAppStore } from 'app/store/storeHooks';
-import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
-import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
-import { videoFrameFromImageDndTarget } from 'features/dnd/dnd';
-import { DndDropTarget } from 'features/dnd/DndDropTarget';
-import { startingFrameImageChanged } from 'features/parameters/store/videoSlice';
-import { LaunchpadButton } from 'features/ui/layouts/LaunchpadButton';
-import { memo, useMemo } from 'react';
-import { useTranslation } from 'react-i18next';
-import { PiUploadBold, PiVideoBold } from 'react-icons/pi';
-import type { ImageDTO } from 'services/api/types';
-
-const dndTargetData = videoFrameFromImageDndTarget.getData({ frame: 'start' });
-
-export const LaunchpadStartingFrameButton = memo((props: { extraAction?: () => void }) => {
- const { t } = useTranslation();
- const { dispatch } = useAppStore();
-
- const uploadOptions = useMemo(
- () =>
- ({
- onUpload: (imageDTO: ImageDTO) => {
- dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO)));
- props.extraAction?.();
- },
- allowMultiple: false,
- }) as const,
- [dispatch, props]
- );
-
- const uploadApi = useImageUploadButton(uploadOptions);
-
- return (
-
-
-
- {t('ui.launchpad.addStartingFrame.title')}
- {t('ui.launchpad.addStartingFrame.description')}
-
-
-
-
-
-
-
- );
-});
-
-LaunchpadStartingFrameButton.displayName = 'LaunchpadStartingFrameButton';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/VideoLaunchpadPanel.tsx b/invokeai/frontend/web/src/features/ui/layouts/VideoLaunchpadPanel.tsx
deleted file mode 100644
index d7eb9c577c4..00000000000
--- a/invokeai/frontend/web/src/features/ui/layouts/VideoLaunchpadPanel.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-import { Button, Flex, Grid, Text } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $videoUpsellComponent } from 'app/store/nanostores/videoUpsellComponent';
-import { useAppSelector } from 'app/store/storeHooks';
-import { VideoModelPicker } from 'features/settingsAccordions/components/VideoSettingsAccordion/VideoModelPicker';
-import { selectAllowVideo } from 'features/system/store/configSlice';
-import { memo } from 'react';
-import { useTranslation } from 'react-i18next';
-
-import { LaunchpadContainer } from './LaunchpadContainer';
-import { LaunchpadGenerateFromTextButton } from './LaunchpadGenerateFromTextButton';
-import { LaunchpadStartingFrameButton } from './LaunchpadStartingFrameButton';
-
-export const VideoLaunchpadPanel = memo(() => {
- const { t } = useTranslation();
- const isVideoEnabled = useAppSelector(selectAllowVideo);
- const videoUpsellComponent = useStore($videoUpsellComponent);
-
- if (!isVideoEnabled) {
- return {videoUpsellComponent};
- }
-
- return (
-
-
-
-
-
- {t('ui.launchpad.modelGuideText')}{' '}
-
- {t('ui.launchpad.modelGuideLink')}
-
-
-
-
-
-
-
- );
-});
-VideoLaunchpadPanel.displayName = 'VideoLaunchpadPanel';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx b/invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx
deleted file mode 100644
index 0d99be4f963..00000000000
--- a/invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx
+++ /dev/null
@@ -1,16 +0,0 @@
-import { Box, Flex } from '@invoke-ai/ui-library';
-import QueueControls from 'features/queue/components/QueueControls';
-import { ParametersPanelVideo } from 'features/ui/components/ParametersPanels/ParametersPanelVideo';
-import { memo } from 'react';
-
-export const VideoTabLeftPanel = memo(() => {
- return (
-
-
-
-
-
-
- );
-});
-VideoTabLeftPanel.displayName = 'VideoTabLeftPanel';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx
index b622b77e4de..e2cbfe2c5d2 100644
--- a/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx
+++ b/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx
@@ -2,7 +2,7 @@ import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps
import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview';
import { CanvasLayersPanel } from 'features/controlLayers/components/CanvasLayersPanelContent';
import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent';
-import { GalleryPanel } from 'features/gallery/components/Gallery';
+import { GalleryPanel } from 'features/gallery/components/GalleryPanel';
import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel';
import { FloatingCanvasLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons';
import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx
index 81cf2885474..e60c15b5da3 100644
--- a/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx
+++ b/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx
@@ -1,7 +1,7 @@
import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview';
import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview';
import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent';
-import { GalleryPanel } from 'features/gallery/components/Gallery';
+import { GalleryPanel } from 'features/gallery/components/GalleryPanel';
import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel';
import { FloatingLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons';
import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx
index 7820187119c..e4f443148ff 100644
--- a/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx
+++ b/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx
@@ -1,7 +1,7 @@
import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview';
import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview';
import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent';
-import { GalleryPanel } from 'features/gallery/components/Gallery';
+import { GalleryPanel } from 'features/gallery/components/GalleryPanel';
import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel';
import { FloatingLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons';
import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx
deleted file mode 100644
index dbf9b44e4c7..00000000000
--- a/invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx
+++ /dev/null
@@ -1,278 +0,0 @@
-import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview';
-import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview';
-import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent';
-import { GalleryPanel } from 'features/gallery/components/Gallery';
-import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel';
-import { FloatingLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons';
-import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons';
-import type {
- AutoLayoutDockviewComponents,
- AutoLayoutGridviewComponents,
- DockviewPanelParameters,
- GridviewPanelParameters,
- RootLayoutGridviewComponents,
-} from 'features/ui/layouts/auto-layout-context';
-import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context';
-import type { TabName } from 'features/ui/store/uiTypes';
-import { dockviewTheme } from 'features/ui/styles/theme';
-import { t } from 'i18next';
-import { memo, useCallback, useEffect } from 'react';
-
-import { DockviewTab } from './DockviewTab';
-import { DockviewTabLaunchpad } from './DockviewTabLaunchpad';
-import { DockviewTabProgress } from './DockviewTabProgress';
-import { navigationApi } from './navigation-api';
-import { PanelHotkeysLogical } from './PanelHotkeysLogical';
-import {
- BOARD_PANEL_DEFAULT_HEIGHT_PX,
- BOARD_PANEL_MIN_HEIGHT_PX,
- BOARDS_PANEL_ID,
- DOCKVIEW_TAB_ID,
- DOCKVIEW_TAB_LAUNCHPAD_ID,
- DOCKVIEW_TAB_PROGRESS_ID,
- GALLERY_PANEL_DEFAULT_HEIGHT_PX,
- GALLERY_PANEL_ID,
- GALLERY_PANEL_MIN_HEIGHT_PX,
- LAUNCHPAD_PANEL_ID,
- LEFT_PANEL_ID,
- LEFT_PANEL_MIN_SIZE_PX,
- MAIN_PANEL_ID,
- RIGHT_PANEL_ID,
- RIGHT_PANEL_MIN_SIZE_PX,
- SETTINGS_PANEL_ID,
- VIEWER_PANEL_ID,
-} from './shared';
-import { VideoLaunchpadPanel } from './VideoLaunchpadPanel';
-import { VideoTabLeftPanel } from './VideoTabLeftPanel';
-
-const tabComponents = {
- [DOCKVIEW_TAB_ID]: DockviewTab,
- [DOCKVIEW_TAB_PROGRESS_ID]: DockviewTabProgress,
- [DOCKVIEW_TAB_LAUNCHPAD_ID]: DockviewTabLaunchpad,
-};
-
-const mainPanelComponents: AutoLayoutDockviewComponents = {
- [LAUNCHPAD_PANEL_ID]: withPanelContainer(VideoLaunchpadPanel),
- [VIEWER_PANEL_ID]: withPanelContainer(ImageViewerPanel),
-};
-
-const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
- navigationApi.registerContainer(tab, 'main', api, () => {
- const launchpad = api.addPanel({
- id: LAUNCHPAD_PANEL_ID,
- component: LAUNCHPAD_PANEL_ID,
- title: t('ui.panels.launchpad'),
- tabComponent: DOCKVIEW_TAB_LAUNCHPAD_ID,
- params: {
- tab,
- focusRegion: 'launchpad',
- i18nKey: 'ui.panels.launchpad',
- },
- });
-
- api.addPanel({
- id: VIEWER_PANEL_ID,
- component: VIEWER_PANEL_ID,
- title: t('ui.panels.imageViewer'),
- tabComponent: DOCKVIEW_TAB_PROGRESS_ID,
- params: {
- tab,
- focusRegion: 'viewer',
- i18nKey: 'ui.panels.imageViewer',
- },
- position: {
- direction: 'within',
- referencePanel: launchpad.id,
- },
- });
-
- launchpad.api.setActive();
- });
-};
-
-const MainPanel = memo(() => {
- const { tab } = useAutoLayoutContext();
-
- const onReady = useCallback(
- ({ api }) => {
- initializeMainPanelLayout(tab, api);
- },
- [tab]
- );
- return (
- <>
-
-
-
-
- >
- );
-});
-MainPanel.displayName = 'MainPanel';
-
-const rightPanelComponents: AutoLayoutGridviewComponents = {
- [BOARDS_PANEL_ID]: withPanelContainer(BoardsPanel),
- [GALLERY_PANEL_ID]: withPanelContainer(GalleryPanel),
-};
-
-const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
- navigationApi.registerContainer(tab, 'right', api, () => {
- const gallery = api.addPanel({
- id: GALLERY_PANEL_ID,
- component: GALLERY_PANEL_ID,
- minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
- minimumHeight: GALLERY_PANEL_MIN_HEIGHT_PX,
- params: {
- tab,
- focusRegion: 'gallery',
- },
- });
-
- const boards = api.addPanel({
- id: BOARDS_PANEL_ID,
- component: BOARDS_PANEL_ID,
- minimumHeight: BOARD_PANEL_MIN_HEIGHT_PX,
- params: {
- tab,
- focusRegion: 'boards',
- },
- position: {
- direction: 'above',
- referencePanel: gallery.id,
- },
- });
-
- gallery.api.setSize({ height: GALLERY_PANEL_DEFAULT_HEIGHT_PX });
- boards.api.setSize({ height: BOARD_PANEL_DEFAULT_HEIGHT_PX });
- });
-};
-
-const RightPanel = memo(() => {
- const { tab } = useAutoLayoutContext();
-
- const onReady = useCallback(
- ({ api }) => {
- initializeRightPanelLayout(tab, api);
- },
- [tab]
- );
- return (
-
- );
-});
-RightPanel.displayName = 'RightPanel';
-
-const leftPanelComponents: AutoLayoutGridviewComponents = {
- [SETTINGS_PANEL_ID]: withPanelContainer(VideoTabLeftPanel),
-};
-
-const initializeLeftPanelLayout = (tab: TabName, api: GridviewApi) => {
- navigationApi.registerContainer(tab, 'left', api, () => {
- api.addPanel({
- id: SETTINGS_PANEL_ID,
- component: SETTINGS_PANEL_ID,
- params: {
- tab,
- focusRegion: 'settings',
- },
- });
- });
-};
-
-const LeftPanel = memo(() => {
- const { tab } = useAutoLayoutContext();
-
- const onReady = useCallback(
- ({ api }) => {
- initializeLeftPanelLayout(tab, api);
- },
- [tab]
- );
- return (
-
- );
-});
-LeftPanel.displayName = 'LeftPanel';
-
-const rootPanelComponents: RootLayoutGridviewComponents = {
- [LEFT_PANEL_ID]: LeftPanel,
- [MAIN_PANEL_ID]: MainPanel,
- [RIGHT_PANEL_ID]: RightPanel,
-};
-
-const initializeRootPanelLayout = (tab: TabName, api: GridviewApi) => {
- navigationApi.registerContainer(tab, 'root', api, () => {
- const main = api.addPanel({
- id: MAIN_PANEL_ID,
- component: MAIN_PANEL_ID,
- priority: LayoutPriority.High,
- });
-
- const left = api.addPanel({
- id: LEFT_PANEL_ID,
- component: LEFT_PANEL_ID,
- minimumWidth: LEFT_PANEL_MIN_SIZE_PX,
- position: {
- direction: 'left',
- referencePanel: main.id,
- },
- });
-
- const right = api.addPanel({
- id: RIGHT_PANEL_ID,
- component: RIGHT_PANEL_ID,
- minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
- position: {
- direction: 'right',
- referencePanel: main.id,
- },
- });
-
- left.api.setSize({ width: LEFT_PANEL_MIN_SIZE_PX });
- right.api.setSize({ width: RIGHT_PANEL_MIN_SIZE_PX });
- });
-};
-
-export const VideoTabAutoLayout = memo(() => {
- const onReady = useCallback(({ api }) => {
- initializeRootPanelLayout('video', api);
- }, []);
-
- useEffect(
- () => () => {
- navigationApi.unregisterTab('video');
- },
- []
- );
-
- return (
-
-
-
- );
-});
-VideoTabAutoLayout.displayName = 'VideoTabAutoLayout';
diff --git a/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx
index 06ae423e447..026b7897283 100644
--- a/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx
+++ b/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx
@@ -1,7 +1,7 @@
import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview';
import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview';
import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent';
-import { GalleryPanel } from 'features/gallery/components/Gallery';
+import { GalleryPanel } from 'features/gallery/components/GalleryPanel';
import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel';
import NodeEditor from 'features/nodes/components/NodeEditor';
import WorkflowsTabLeftPanel from 'features/nodes/components/sidePanel/WorkflowsTabLeftPanel';
diff --git a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts
index 95f7603821e..b86ba3fa182 100644
--- a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts
+++ b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts
@@ -1,7 +1,7 @@
import { isPlainObject } from 'es-toolkit';
import { z } from 'zod';
-export const zTabName = z.enum(['generate', 'canvas', 'upscaling', 'workflows', 'models', 'queue', 'video']);
+const zTabName = z.enum(['generate', 'canvas', 'upscaling', 'workflows', 'models', 'queue']);
export type TabName = z.infer;
const zPartialDimensions = z.object({
diff --git a/invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx b/invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx
deleted file mode 100644
index 3247ab733e4..00000000000
--- a/invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx
+++ /dev/null
@@ -1,53 +0,0 @@
-import { Flex } from '@invoke-ai/ui-library';
-import { useStore } from '@nanostores/react';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { useVideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu';
-import { useVideoViewerContext } from 'features/video/context/VideoViewerContext';
-import { MediaController } from 'media-chrome/react';
-import { memo, useRef } from 'react';
-import ReactPlayer from 'react-player';
-import type { VideoDTO } from 'services/api/types';
-
-import { VideoPlayerControls } from './VideoPlayerControls';
-
-interface VideoPlayerProps {
- videoDTO: VideoDTO;
-}
-
-export const VideoPlayer = memo(({ videoDTO }: VideoPlayerProps) => {
- const ref = useRef(null);
- useVideoContextMenu(videoDTO, ref);
- const { videoRef } = useVideoViewerContext();
- const authToken = useStore($authToken);
-
- return (
-
-
-
-
-
-
-
- );
-});
-
-VideoPlayer.displayName = 'VideoPlayer';
diff --git a/invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx b/invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx
deleted file mode 100644
index df642880e20..00000000000
--- a/invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx
+++ /dev/null
@@ -1,66 +0,0 @@
-import { Icon, IconButton } from '@invoke-ai/ui-library';
-import { useVideoViewerContext } from 'features/video/context/VideoViewerContext';
-import { useCaptureVideoFrame } from 'features/video/hooks/useCaptureVideoFrame';
-import {
- MediaControlBar,
- MediaFullscreenButton,
- MediaPlayButton,
- MediaTimeDisplay,
- MediaTimeRange,
-} from 'media-chrome/react';
-import type { CSSProperties } from 'react';
-import { useCallback, useState } from 'react';
-import { PiArrowsOutBold, PiCameraBold, PiPauseFill, PiPlayFill, PiSpinnerBold } from 'react-icons/pi';
-
-const NoHoverBackground = {
- '--media-text-color': 'base.200',
- '--media-font-size': '12px',
-} as CSSProperties;
-
-export const VideoPlayerControls = () => {
- const captureVideoFrame = useCaptureVideoFrame();
- const [capturing, setCapturing] = useState(false);
- const { videoRef } = useVideoViewerContext();
-
- const onClickSaveFrame = useCallback(async () => {
- setCapturing(true);
- await captureVideoFrame(videoRef.current);
- setCapturing(false);
- }, [captureVideoFrame, videoRef]);
-
- return (
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- }
- size="lg"
- variant="unstyled"
- onClick={onClickSaveFrame}
- aria-label="Save Current Frame"
- isDisabled={capturing}
- _disabled={{
- background: 'rgba(20, 20, 30, 0.7)',
- }}
- height="100%"
- backgroundColor="rgba(20, 20, 30, 0.7)"
- pb={3}
- />
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/video/components/VideoView.tsx b/invokeai/frontend/web/src/features/video/components/VideoView.tsx
deleted file mode 100644
index 4234c5b16b2..00000000000
--- a/invokeai/frontend/web/src/features/video/components/VideoView.tsx
+++ /dev/null
@@ -1,26 +0,0 @@
-import { Flex } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import { useFocusRegion } from 'common/hooks/focus';
-import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
-import { useRef } from 'react';
-import { useVideoDTO } from 'services/api/endpoints/videos';
-
-import { VideoPlayer } from './VideoPlayer';
-
-export const VideoView = () => {
- const ref = useRef(null);
- const lastSelectedItem = useAppSelector(selectLastSelectedItem);
- const videoDTO = useVideoDTO(lastSelectedItem?.id);
-
- useFocusRegion('video', ref);
-
- if (!videoDTO) {
- return null;
- }
-
- return (
-
-
-
- );
-};
diff --git a/invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx b/invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx
deleted file mode 100644
index 96f0b3be452..00000000000
--- a/invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx
+++ /dev/null
@@ -1,24 +0,0 @@
-import type { PropsWithChildren, RefObject } from 'react';
-import { createContext, memo, useContext, useMemo, useRef } from 'react';
-import { assert } from 'tsafe';
-
-type VideoViewerContextValue = {
- videoRef: RefObject;
-};
-
-const VideoViewerContext = createContext(null);
-
-export const VideoViewerContextProvider = memo((props: PropsWithChildren) => {
- const videoRef = useRef(null);
-
- const value = useMemo(() => ({ videoRef }), [videoRef]);
-
- return {props.children};
-});
-VideoViewerContextProvider.displayName = 'VideoViewerContextProvider';
-
-export const useVideoViewerContext = () => {
- const value = useContext(VideoViewerContext);
- assert(value !== null, 'useVideoViewerContext must be used within a VideoViewerContextProvider');
- return value;
-};
diff --git a/invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts b/invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts
deleted file mode 100644
index 481bcc30a2c..00000000000
--- a/invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts
+++ /dev/null
@@ -1,90 +0,0 @@
-import { logger } from 'app/logging/logger';
-import { toast } from 'features/toast/toast';
-import { useCallback } from 'react';
-import { serializeError } from 'serialize-error';
-import { uploadImage } from 'services/api/endpoints/images';
-
-const log = logger('video');
-
-const captureFrame = (video: HTMLVideoElement): File => {
- // Validate video element
- if (video.videoWidth === 0 || video.videoHeight === 0) {
- throw new Error('Invalid video element or video not loaded');
- }
-
- // Check if video is ready for capture
- // https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/readyState
- // 2 == HAVE_CURRENT_DATA
- if (video.readyState < 2) {
- throw new Error('Video is not ready for frame capture');
- }
-
- const canvas = document.createElement('canvas');
- canvas.width = video.videoWidth || 0;
- canvas.height = video.videoHeight || 0;
-
- const context = canvas.getContext('2d');
- if (!context) {
- throw new Error('Failed to get canvas 2D context');
- }
-
- // Draw the current video frame to canvas
- context.drawImage(video, 0, 0);
-
- // Convert to data URL with proper format
- const dataUri = canvas.toDataURL('image/png', 0.92);
- const data = dataUri.split(',')[1];
- const mimeType = dataUri.split(';')[0]?.slice(5);
-
- if (!data || !mimeType) {
- throw new Error('Failed to extract image data from canvas');
- }
-
- // Convert to blob
- const bytes = window.atob(data);
- const buf = new ArrayBuffer(bytes.length);
- const arr = new Uint8Array(buf);
-
- for (let i = 0; i < bytes.length; i++) {
- arr[i] = bytes.charCodeAt(i);
- }
-
- const blob = new Blob([arr], { type: mimeType });
- const file = new File([blob], 'frame.png', { type: mimeType });
- return file;
-};
-
-export const useCaptureVideoFrame = () => {
- /*
- * Capture the current frame of the video uploading it as an asset.
- *
- * Toasts on success or failure. For convenience, accepts null but immediately creates a toast.
- */
- const captureVideoFrame = useCallback(async (video: HTMLVideoElement | null) => {
- try {
- if (!video) {
- toast({
- status: 'error',
- title: 'Video not ready',
- description: 'Please wait for the video to load before capturing a frame.',
- });
- return;
- }
- const file = captureFrame(video);
- await uploadImage({ file, image_category: 'user', is_intermediate: false, silent: true });
- toast({
- status: 'success',
- title: 'Frame saved to assets tab',
- });
- } catch (error) {
- log.error({ error: serializeError(error as Error) }, 'Failed to capture frame');
- toast({
- status: 'error',
- title: 'Failed to capture frame',
- description: 'There was an error capturing the current video frame.',
- });
- }
- }, []);
-
- return captureVideoFrame;
-};
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx
index f7d5ec40ece..72ca9c309b3 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx
@@ -5,7 +5,6 @@ import {
AlertDialogFooter,
AlertDialogHeader,
Button,
- Checkbox,
Flex,
FormControl,
FormLabel,
@@ -14,7 +13,6 @@ import {
import { useStore } from '@nanostores/react';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import { deepClone } from 'common/util/deepClone';
-import { $workflowLibraryCategoriesOptions } from 'features/nodes/store/workflowLibrarySlice';
import type { WorkflowV3 } from 'features/nodes/types/workflow';
import { isDraftWorkflow, useCreateLibraryWorkflow } from 'features/workflowLibrary/hooks/useCreateNewWorkflow';
import { t } from 'i18next';
@@ -83,14 +81,12 @@ export const SaveWorkflowAsDialog = () => {
};
const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef: RefObject }) => {
- const workflowCategories = useStore($workflowLibraryCategoriesOptions);
const [name, setName] = useState(() => {
if (workflow) {
return getInitialName(workflow);
}
return '';
});
- const [shouldSaveToProject, setShouldSaveToProject] = useState(() => workflowCategories.includes('project'));
const { createNewWorkflow } = useCreateLibraryWorkflow();
@@ -100,13 +96,6 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
setName(e.target.value);
}, []);
- const onChangeCheckbox = useCallback(
- (e: ChangeEvent) => {
- setShouldSaveToProject(e.target.checked);
- },
- [setShouldSaveToProject]
- );
-
const onClose = useCallback(() => {
$workflowToSave.set(null);
}, []);
@@ -114,8 +103,7 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
const onSave = useCallback(async () => {
workflow.id = undefined;
workflow.name = name;
- workflow.meta.category = shouldSaveToProject ? 'project' : 'user';
- workflow.is_published = false;
+ workflow.meta.category = 'user';
// We've just made the workflow a draft, but TS doesn't know that. We need to assert it.
assert(isDraftWorkflow(workflow));
@@ -125,7 +113,7 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
onSuccess: onClose,
onError: onClose,
});
- }, [workflow, name, shouldSaveToProject, createNewWorkflow, onClose]);
+ }, [workflow, name, createNewWorkflow, onClose]);
return (
@@ -138,11 +126,6 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
{t('workflows.workflowName')}
- {workflowCategories.includes('project') && (
-
- {t('workflows.saveWorkflowToProject')}
-
- )}
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx
index 26ce0302960..6f5acc431ed 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx
@@ -1,6 +1,5 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
-import { useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish';
import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -10,12 +9,11 @@ const SaveWorkflowMenuItem = () => {
const { t } = useTranslation();
const saveOrSaveAsWorkflow = useSaveOrSaveAsWorkflow();
const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
- const isPublished = useIsWorkflowPublished();
return (
}
onClick={saveOrSaveAsWorkflow}
>
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts
index b9d65bca897..40c186b44d4 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts
+++ b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts
@@ -1,4 +1,3 @@
-import { useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish';
import { useBuildWorkflowFast } from 'features/nodes/util/workflow/buildWorkflow';
import { saveWorkflowAs } from 'features/workflowLibrary/components/SaveWorkflowAsDialog';
import { isLibraryWorkflow, useSaveLibraryWorkflow } from 'features/workflowLibrary/hooks/useSaveLibraryWorkflow';
@@ -11,18 +10,17 @@ import { useCallback } from 'react';
*/
export const useSaveOrSaveAsWorkflow = () => {
const buildWorkflow = useBuildWorkflowFast();
- const isPublished = useIsWorkflowPublished();
const { saveWorkflow } = useSaveLibraryWorkflow();
const saveOrSaveAsWorkflow = useCallback(() => {
const workflow = buildWorkflow();
- if (isLibraryWorkflow(workflow) && !isPublished) {
+ if (isLibraryWorkflow(workflow)) {
saveWorkflow(workflow);
} else {
saveWorkflowAs(workflow);
}
- }, [buildWorkflow, isPublished, saveWorkflow]);
+ }, [buildWorkflow, saveWorkflow]);
return saveOrSaveAsWorkflow;
};
diff --git a/invokeai/frontend/web/src/index.ts b/invokeai/frontend/web/src/index.ts
deleted file mode 100644
index 01688756989..00000000000
--- a/invokeai/frontend/web/src/index.ts
+++ /dev/null
@@ -1,76 +0,0 @@
-import { enqueueRequestedVideos } from 'features/queue/hooks/useEnqueueVideo';
-
-import { adHocPostProcessingRequested } from './app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
-import { socketConnected } from './app/store/middleware/listenerMiddleware/listeners/socketConnected';
-import {
- controlLayerAdded,
- inpaintMaskAdded,
- rasterLayerAdded,
- rgAdded,
-} from './features/controlLayers/store/canvasSlice';
-import { refImageAdded } from './features/controlLayers/store/refImagesSlice';
-import {
- imageCopiedToClipboard,
- imageDownloaded,
- imageOpenedInNewTab,
- imageUploadedClientSide,
- sentImageToCanvas,
-} from './features/gallery/store/actions';
-import { boardIdSelected } from './features/gallery/store/gallerySlice';
-import { workflowLoaded } from './features/nodes/store/nodesSlice';
-import { enqueueRequestedCanvas } from './features/queue/hooks/useEnqueueCanvas';
-import { enqueueRequestedGenerate } from './features/queue/hooks/useEnqueueGenerate';
-import { enqueueRequestedUpscaling } from './features/queue/hooks/useEnqueueUpscaling';
-import { enqueueRequestedWorkflows } from './features/queue/hooks/useEnqueueWorkflows';
-import { videoModalLinkClicked, videoModalOpened } from './features/system/store/actions';
-import { accordionStateChanged, expanderStateChanged } from './features/ui/store/uiSlice';
-import {
- newWorkflowSaved,
- workflowDownloaded,
- workflowLoadedFromFile,
- workflowUpdated,
-} from './features/workflowLibrary/store/actions';
-export { default as InvokeAIUI } from './app/components/InvokeAIUI';
-export type { StudioInitAction } from './app/hooks/useStudioInitAction';
-export type { LoggingOverrides } from './app/logging/logger';
-export type { NumericalParameterConfig, PartialAppConfig } from './app/types/invokeai';
-export { default as Loading } from './common/components/Loading/Loading';
-export { default as HotkeysModal } from './features/system/components/HotkeysModal/HotkeysModal';
-export { default as InvokeAiLogoComponent } from './features/system/components/InvokeAILogoComponent';
-export { default as SettingsModal } from './features/system/components/SettingsModal/SettingsModal';
-export { default as StatusIndicator } from './features/system/components/StatusIndicator';
-export { boardsApi } from './services/api/endpoints/boards';
-export { imagesApi } from './services/api/endpoints/images';
-export { queueApi } from './services/api/endpoints/queue';
-export { stylePresetsApi } from './services/api/endpoints/stylePresets';
-export { workflowsApi } from './services/api/endpoints/workflows';
-
-export const reduxActions = {
- videoModalLinkClicked,
- videoModalOpened,
- socketConnected,
- workflowDownloaded,
- workflowLoadedFromFile,
- newWorkflowSaved,
- workflowUpdated,
- workflowLoaded,
- sentImageToCanvas,
- imageDownloaded,
- imageCopiedToClipboard,
- imageOpenedInNewTab,
- imageUploadedClientSide,
- accordionStateChanged,
- expanderStateChanged,
- enqueueRequestedGenerate,
- enqueueRequestedCanvas,
- enqueueRequestedWorkflows,
- enqueueRequestedUpscaling,
- enqueueRequestedVideos,
- adHocPostProcessingRequested,
- boardIdSelected,
- rasterLayerAdded,
- controlLayerAdded,
- rgAdded,
- inpaintMaskAdded,
- refImageAdded,
-} as const;
diff --git a/invokeai/frontend/web/src/services/api/README.md b/invokeai/frontend/web/src/services/api/README.md
new file mode 100644
index 00000000000..2cd21dbd99b
--- /dev/null
+++ b/invokeai/frontend/web/src/services/api/README.md
@@ -0,0 +1,20 @@
+# API
+
+The API client is a fairly standard Redux Toolkit Query (RTK-Query) setup.
+
+It defines a simple base query with special handling for OpenAPI schema queries and endpoints: invokeai/frontend/web/src/services/api/index.ts
+
+## Types
+
+The API provides an OpenAPI schema and we generate TS types from it. They are stored in: invokeai/frontend/web/src/services/api/schema.ts
+
+We use https://github.com/openapi-ts/openapi-typescript/ to generate the types.
+
+- Python script to outut the OpenAPI schema: scripts/generate_openapi_schema.py
+- Node script to call openapi-typescript and generate the TS types: invokeai/frontend/web/scripts/typegen.js
+
+Pipe the output of the python script to the node script to update the types. There is a `make` target that does this in one fell swoop (after activating venv): `make frontend-typegen`
+
+Alternatively, start the ptyhon server and run `pnpm typegen`.
+
+The schema.ts file is pushed to the repo, and a CI check ensures it is up to date.
diff --git a/invokeai/frontend/web/src/services/api/authToastMiddleware.ts b/invokeai/frontend/web/src/services/api/authToastMiddleware.ts
deleted file mode 100644
index 94dfaca2da2..00000000000
--- a/invokeai/frontend/web/src/services/api/authToastMiddleware.ts
+++ /dev/null
@@ -1,81 +0,0 @@
-import type { Middleware } from '@reduxjs/toolkit';
-import { isRejectedWithValue } from '@reduxjs/toolkit';
-import { $toastMap } from 'app/store/nanostores/toastMap';
-import { toast } from 'features/toast/toast';
-import { t } from 'i18next';
-import { z } from 'zod';
-
-const trialUsageErrorSubstring = 'usage allotment for the free trial';
-const trialUsageErrorCode = 'USAGE_LIMIT_TRIAL';
-
-const orgUsageErrorSubstring = 'organization has reached its predefined usage allotment';
-const orgUsageErrorCode = 'USAGE_LIMIT_ORG';
-
-const indieUsageErrorSubstring = 'usage allotment';
-const indieUsageErrorCode = 'USAGE_LIMIT_INDIE';
-
-//TODO make this dynamic with returned error codes instead of substring check
-const getErrorCode = (errorString?: string) => {
- if (!errorString) {
- return undefined;
- }
- if (errorString.includes(trialUsageErrorSubstring)) {
- return trialUsageErrorCode;
- }
- if (errorString.includes(orgUsageErrorSubstring)) {
- return orgUsageErrorCode;
- }
- if (errorString.includes(indieUsageErrorSubstring)) {
- return indieUsageErrorCode;
- }
-};
-
-const zRejectedForbiddenAction = z.object({
- payload: z.object({
- status: z.literal(403),
- data: z.object({
- detail: z.string(),
- }),
- }),
- meta: z
- .object({
- arg: z
- .object({
- endpointName: z.string().optional(),
- })
- .optional(),
- })
- .optional(),
-});
-
-export const authToastMiddleware: Middleware = () => (next) => (action) => {
- if (isRejectedWithValue(action)) {
- try {
- const parsed = zRejectedForbiddenAction.parse(action);
- const endpointName = parsed.meta?.arg?.endpointName;
- if (endpointName === 'getImageDTO') {
- // do not show toast if problem is image access
- return next(action);
- }
- const toastMap = $toastMap.get();
- const customMessage = parsed.payload.data.detail !== 'Forbidden' ? parsed.payload.data.detail : undefined;
- const errorCode = getErrorCode(customMessage);
- const customToastConfig = errorCode ? toastMap?.[errorCode] : undefined;
-
- if (customToastConfig) {
- toast(customToastConfig);
- } else {
- toast({
- id: `auth-error-toast-${endpointName}`,
- title: t('toast.somethingWentWrong'),
- status: 'error',
- description: customMessage,
- });
- }
- } catch {
- // no-op
- }
- }
-
- return next(action);
-};
diff --git a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts
index d4257742189..f72d6ad81e8 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts
@@ -1,8 +1,7 @@
-import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
import type { OpenAPIV3_1 } from 'openapi-types';
import type { stringify } from 'querystring';
import type { paths } from 'services/api/schema';
-import type { AppConfig, AppVersion } from 'services/api/types';
+import type { AppVersion } from 'services/api/types';
import { api, buildV1Url } from '..';
@@ -34,9 +33,12 @@ export const appInfoApi = api.injectEndpoints({
}),
providesTags: ['FetchOnReconnect'],
}),
- getAppConfig: build.query({
+ getPatchmatchStatus: build.query<
+ paths['/api/v1/app/patchmatch_status']['get']['responses']['200']['content']['application/json'],
+ void
+ >({
query: () => ({
- url: buildAppInfoUrl('config'),
+ url: buildAppInfoUrl('patchmatch_status'),
method: 'GET',
}),
providesTags: ['FetchOnReconnect'],
@@ -82,11 +84,7 @@ export const appInfoApi = api.injectEndpoints({
invalidatesTags: ['InvocationCacheStatus'],
}),
getOpenAPISchema: build.query({
- query: () => {
- const openAPISchemaUrl = $openAPISchemaUrl.get();
- const url = openAPISchemaUrl ? openAPISchemaUrl : `${window.location.href.replace(/\/$/, '')}/openapi.json`;
- return url;
- },
+ query: () => `${window.location.href.replace(/\/$/, '')}/openapi.json`,
providesTags: ['Schema'],
}),
}),
@@ -95,7 +93,7 @@ export const appInfoApi = api.injectEndpoints({
export const {
useGetAppVersionQuery,
useGetAppDepsQuery,
- useGetAppConfigQuery,
+ useGetPatchmatchStatusQuery,
useGetRuntimeConfigQuery,
useClearInvocationCacheMutation,
useDisableInvocationCacheMutation,
diff --git a/invokeai/frontend/web/src/services/api/endpoints/boards.ts b/invokeai/frontend/web/src/services/api/endpoints/boards.ts
index 81d8d6db09b..9b7a4f2ad8a 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/boards.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/boards.ts
@@ -3,7 +3,6 @@ import queryString from 'query-string';
import type {
BoardDTO,
CreateBoardArg,
- GetVideoIdsResult,
ImageCategory,
ListBoardsArgs,
OffsetPaginatedResults_ImageDTO_,
@@ -13,7 +12,6 @@ import { getListImagesUrl } from 'services/api/util';
import type { ApiTagDescription } from '..';
import { api, buildV1Url, LIST_TAG } from '..';
-import { buildVideosUrl } from './videos';
/**
* Builds an endpoint URL for the boards router
@@ -97,26 +95,15 @@ export const boardsApi = api.injectEndpoints({
},
}),
- getBoardVideosTotal: build.query<{ total: number }, string | undefined>({
- query: (board_id) => ({
- url: buildVideosUrl('ids', { board_id: board_id ?? 'none' }),
- method: 'GET',
- }),
- providesTags: (result, error, arg) => [{ type: 'BoardVideosTotal', id: arg ?? 'none' }, 'FetchOnReconnect'],
- transformResponse: (response: GetVideoIdsResult) => {
- return { total: response.total_count };
- },
- }),
-
/**
* Boards Mutations
*/
createBoard: build.mutation({
- query: ({ board_name, is_private }) => ({
+ query: ({ board_name }) => ({
url: buildBoardsUrl(),
method: 'POST',
- params: { board_name, is_private },
+ params: { board_name },
}),
invalidatesTags: [{ type: 'Board', id: LIST_TAG }],
}),
@@ -145,7 +132,6 @@ export const {
useListAllBoardsQuery,
useGetBoardImagesTotalQuery,
useGetBoardAssetsTotalQuery,
- useGetBoardVideosTotalQuery,
useCreateBoardMutation,
useUpdateBoardMutation,
useListAllImageNamesForBoardQuery,
diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts
index b5b2827ee73..7b150ac3572 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/images.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts
@@ -1,5 +1,4 @@
import { skipToken } from '@reduxjs/toolkit/query';
-import { $authToken } from 'app/store/nanostores/authToken';
import { getStore } from 'app/store/nanostores/store';
import type { CroppableImageWithDims } from 'features/controlLayers/store/types';
import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types';
@@ -476,7 +475,6 @@ export const {
useGetImageWorkflowQuery,
useLazyGetImageWorkflowQuery,
useUploadImageMutation,
- useCreateImageUploadEntryMutation,
useClearIntermediatesMutation,
useAddImagesToBoardMutation,
useRemoveImagesFromBoardMutation,
@@ -530,25 +528,6 @@ export const getImageDTO = (
return req.unwrap();
};
-/**
- * Imperative RTKQ helper to fetch an image's metadata.
- * @param image_name The name of the image
- * @param options The options for the query. By default, the query will not subscribe to the store.
- * @raises Error if the image metadata is not found or there is an error fetching the image metadata. Images without
- * metadata will return undefined.
- */
-export const getImageMetadata = (
- image_name: string,
- options?: Parameters[1]
-): Promise => {
- const _options = {
- subscribe: false,
- ...options,
- };
- const req = getStore().dispatch(imagesApi.endpoints.getImageMetadata.initiate(image_name, _options));
- return req.unwrap();
-};
-
export const uploadImage = (arg: UploadImageArg): Promise => {
const { dispatch } = getStore();
const req = dispatch(imagesApi.endpoints.uploadImage.initiate(arg, { track: false }));
@@ -579,10 +558,6 @@ export const uploadImages = async (args: UploadImageArg[]): Promise
*/
export const imageDTOToFile = async (imageDTO: ImageDTO): Promise => {
const init: RequestInit = {};
- const authToken = $authToken.get();
- if (authToken) {
- init.headers = { Authorization: `Bearer ${authToken}` };
- }
const res = await fetch(imageDTO.image_url, init);
const blob = await res.blob();
// Create a new file with the same name, which we will upload
diff --git a/invokeai/frontend/web/src/services/api/endpoints/queue.ts b/invokeai/frontend/web/src/services/api/endpoints/queue.ts
index 81027d4f2b0..c246bc30beb 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/queue.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/queue.ts
@@ -1,4 +1,3 @@
-import { $queueId } from 'app/store/nanostores/queueId';
import queryString from 'query-string';
import type { components, paths } from 'services/api/schema';
import type {
@@ -19,7 +18,7 @@ import { api, buildV1Url, LIST_ALL_TAG, LIST_TAG } from '..';
* buildQueueUrl('some-path')
* // '/api/v1/queue/queue_id/some-path'
*/
-const buildQueueUrl = (path: string = '') => buildV1Url(`queue/${$queueId.get()}/${path}`);
+const buildQueueUrl = (path: string = '') => buildV1Url(`queue/default/${path}`);
export type SessionQueueItemStatus = NonNullable;
diff --git a/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts b/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts
index 25d80dc47e2..f04f0a4ded6 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts
@@ -1,4 +1,3 @@
-import { getStore } from 'app/store/nanostores/store';
import type { paths } from 'services/api/schema';
import type { S } from 'services/api/types';
@@ -129,22 +128,4 @@ export const {
useImportStylePresetsMutation,
} = stylePresetsApi;
-/**
- * Imperative RTKQ helper to fetch a style preset.
- * @param style_preset_id The id of the style preset to fetch
- * @param options The options for the query. By default, the query will not subscribe to the store.
- * @raises Error if the style preset is not found or there is an error fetching the style preset
- */
-export const getStylePreset = (
- style_preset_id: string,
- options?: Parameters[1]
-): Promise => {
- const _options = {
- subscribe: false,
- ...options,
- };
- const req = getStore().dispatch(stylePresetsApi.endpoints.getStylePreset.initiate(style_preset_id, _options));
- return req.unwrap();
-};
-
export const selectListStylePresetsRequestState = stylePresetsApi.endpoints.listStylePresets.select();
diff --git a/invokeai/frontend/web/src/services/api/endpoints/videos.ts b/invokeai/frontend/web/src/services/api/endpoints/videos.ts
deleted file mode 100644
index e6f6bd7f6d9..00000000000
--- a/invokeai/frontend/web/src/services/api/endpoints/videos.ts
+++ /dev/null
@@ -1,239 +0,0 @@
-import { skipToken } from '@reduxjs/toolkit/query';
-import { getStore } from 'app/store/nanostores/store';
-import type { paths } from 'services/api/schema';
-import type { GetVideoIdsArgs, GetVideoIdsResult, VideoDTO } from 'services/api/types';
-import {
- getTagsToInvalidateForBoardAffectingMutation,
- getTagsToInvalidateForVideoMutation,
-} from 'services/api/util/tagInvalidation';
-import stableHash from 'stable-hash';
-import type { Param0 } from 'tsafe';
-import type { JsonObject } from 'type-fest';
-
-import { api, buildV1Url, LIST_TAG } from '..';
-
-/**
- * Builds an endpoint URL for the videos router
- * @example
- * buildVideosUrl('some-path')
- * // '/api/v1/videos/some-path'
- */
-export const buildVideosUrl = (path: string = '', query?: Parameters[1]) =>
- buildV1Url(`videos/${path}`, query);
-
-const buildBoardVideosUrl = (path: string = '') => buildV1Url(`board_videos/${path}`);
-
-export const videosApi = api.injectEndpoints({
- endpoints: (build) => ({
- /**
- * Video Queries
- */
-
- getVideoDTO: build.query({
- query: (video_id) => ({ url: buildVideosUrl(`i/${video_id}`) }),
- providesTags: (result, error, video_id) => [{ type: 'Video', id: video_id }],
- }),
-
- getVideoMetadata: build.query({
- query: (video_id) => ({ url: buildVideosUrl(`i/${video_id}/metadata`) }),
- providesTags: (result, error, video_id) => [{ type: 'VideoMetadata', id: video_id }],
- }),
-
- /**
- * Get ordered list of image names for selection operations
- */
- getVideoIds: build.query({
- query: (queryArgs) => ({
- url: buildVideosUrl('ids', queryArgs),
- method: 'GET',
- }),
- providesTags: (result, error, queryArgs) => [
- 'VideoIdList',
- 'FetchOnReconnect',
- { type: 'VideoIdList', id: stableHash(queryArgs) },
- ],
- }),
- /**
- * Get image DTOs for the specified image names. Maintains order of input names.
- */
- getVideoDTOsByNames: build.mutation<
- paths['/api/v1/videos/videos_by_ids']['post']['responses']['200']['content']['application/json'],
- paths['/api/v1/videos/videos_by_ids']['post']['requestBody']['content']['application/json']
- >({
- query: (body) => ({
- url: buildVideosUrl('videos_by_ids'),
- method: 'POST',
- body,
- }),
- // Don't provide cache tags - we'll manually upsert into individual getImageDTO caches
- async onQueryStarted(_, { dispatch, queryFulfilled }) {
- try {
- const { data: videoDTOs } = await queryFulfilled;
-
- // Upsert each DTO into the individual image cache
- const updates: Param0 = [];
- for (const videoDTO of videoDTOs) {
- updates.push({
- endpointName: 'getVideoDTO',
- arg: videoDTO.video_id,
- value: videoDTO,
- });
- }
- dispatch(videosApi.util.upsertQueryEntries(updates));
- } catch {
- // Handle error if needed
- }
- },
- }),
- /**
- * Star a list of videos.
- */
- starVideos: build.mutation<
- paths['/api/v1/videos/star']['post']['responses']['200']['content']['application/json'],
- paths['/api/v1/videos/star']['post']['requestBody']['content']['application/json']
- >({
- query: (body) => ({
- url: buildVideosUrl('star'),
- method: 'POST',
- body,
- }),
- invalidatesTags: (result) => {
- if (!result) {
- return [];
- }
- return [
- ...getTagsToInvalidateForVideoMutation(result.starred_videos),
- ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards),
- 'VideoCollectionCounts',
- { type: 'VideoCollection', id: 'starred' },
- { type: 'VideoCollection', id: 'unstarred' },
- ];
- },
- }),
- /**
- * Unstar a list of videos.
- */
- unstarVideos: build.mutation<
- paths['/api/v1/videos/unstar']['post']['responses']['200']['content']['application/json'],
- paths['/api/v1/videos/unstar']['post']['requestBody']['content']['application/json']
- >({
- query: (body) => ({
- url: buildVideosUrl('unstar'),
- method: 'POST',
- body,
- }),
- invalidatesTags: (result) => {
- if (!result) {
- return [];
- }
- return [
- ...getTagsToInvalidateForVideoMutation(result.unstarred_videos),
- ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards),
- 'VideoCollectionCounts',
- { type: 'VideoCollection', id: 'starred' },
- { type: 'VideoCollection', id: 'unstarred' },
- ];
- },
- }),
- deleteVideos: build.mutation<
- paths['/api/v1/videos/delete']['post']['responses']['200']['content']['application/json'],
- paths['/api/v1/videos/delete']['post']['requestBody']['content']['application/json']
- >({
- query: (body) => ({
- url: buildVideosUrl('delete'),
- method: 'POST',
- body,
- }),
- invalidatesTags: (result) => {
- if (!result) {
- return [];
- }
- // We ignore the deleted images when getting tags to invalidate. If we did not, we will invalidate the queries
- // that fetch image DTOs, metadata, and workflows. But we have just deleted those images! Invalidating the tags
- // will force those queries to re-fetch, and the requests will of course 404.
- return [
- ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards),
- 'VideoCollectionCounts',
- { type: 'VideoCollection', id: LIST_TAG },
- ];
- },
- }),
- addVideosToBoard: build.mutation<
- paths['/api/v1/board_videos/batch']['post']['responses']['201']['content']['application/json'],
- paths['/api/v1/board_videos/batch']['post']['requestBody']['content']['application/json']
- >({
- query: (body) => ({
- url: buildBoardVideosUrl('batch'),
- method: 'POST',
- body,
- }),
- invalidatesTags: (result) => {
- if (!result) {
- return [];
- }
- return [
- ...getTagsToInvalidateForVideoMutation(result.added_videos),
- ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards),
- ];
- },
- }),
- removeVideosFromBoard: build.mutation<
- paths['/api/v1/board_videos/batch/delete']['post']['responses']['201']['content']['application/json'],
- paths['/api/v1/board_videos/batch/delete']['post']['requestBody']['content']['application/json']
- >({
- query: (body) => ({
- url: buildBoardVideosUrl('batch/delete'),
- method: 'POST',
- body,
- }),
- invalidatesTags: (result) => {
- if (!result) {
- return [];
- }
- return [
- ...getTagsToInvalidateForVideoMutation(result.removed_videos),
- ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards),
- ];
- },
- }),
- }),
-});
-
-export const {
- useGetVideoDTOQuery,
- useGetVideoIdsQuery,
- useGetVideoDTOsByNamesMutation,
- useStarVideosMutation,
- useUnstarVideosMutation,
- useDeleteVideosMutation,
- useAddVideosToBoardMutation,
- useRemoveVideosFromBoardMutation,
- useGetVideoMetadataQuery,
-} = videosApi;
-
-/**
- * Imperative RTKQ helper to fetch an VideoDTO.
- * @param id The id of the video to fetch
- * @param options The options for the query. By default, the query will not subscribe to the store.
- * @returns The ImageDTO if found, otherwise null
- */
-export const getVideoDTOSafe = async (
- id: string,
- options?: Parameters[1]
-): Promise => {
- const _options = {
- subscribe: false,
- ...options,
- };
- const req = getStore().dispatch(videosApi.endpoints.getVideoDTOsByNames.initiate({ video_ids: [id] }, _options));
- try {
- return (await req.unwrap())[0] ?? null;
- } catch {
- return null;
- }
-};
-
-export const useVideoDTO = (video_id: string | null | undefined) => {
- const { currentData: videoDTO } = useGetVideoDTOQuery(video_id ?? skipToken);
- return videoDTO ?? null;
-};
diff --git a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts
index 70cbc76044f..b9a02204fc4 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts
@@ -148,13 +148,6 @@ export const workflowsApi = api.injectEndpoints({
}),
invalidatesTags: (result, error, workflow_id) => [{ type: 'Workflow', id: workflow_id }],
}),
- unpublishWorkflow: build.mutation({
- query: (workflow_id) => ({
- url: buildWorkflowsUrl(`i/${workflow_id}/unpublish`),
- method: 'POST',
- }),
- invalidatesTags: (result, error, workflow_id) => [{ type: 'Workflow', id: workflow_id }],
- }),
}),
});
@@ -170,5 +163,4 @@ export const {
useListWorkflowsInfiniteInfiniteQuery,
useSetWorkflowThumbnailMutation,
useDeleteWorkflowThumbnailMutation,
- useUnpublishWorkflowMutation,
} = workflowsApi;
diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts
index 20d2a1e0c7c..aaeb84a4cc2 100644
--- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts
+++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts
@@ -9,15 +9,12 @@ import {
} from 'services/api/endpoints/models';
import type { AnyModelConfig } from 'services/api/types';
import {
- isChatGPT4oModelConfig,
isCLIPEmbedModelConfigOrSubmodel,
isControlLayerModelConfig,
isControlNetModelConfig,
- isFluxKontextApiModelConfig,
isFluxKontextModelConfig,
isFluxReduxModelConfig,
isFluxVAEModelConfig,
- isGemini2_5ModelConfig,
isIPAdapterModelConfig,
isLoRAModelConfig,
isNonRefinerMainModelConfig,
@@ -26,7 +23,6 @@ import {
isT5EncoderModelConfigOrSubmodel,
isTIModelConfig,
isVAEModelConfigOrSubmodel,
- isVideoModelConfig,
} from 'services/api/types';
const buildModelsHook =
@@ -58,18 +54,11 @@ export const useEmbeddingModels = buildModelsHook(isTIModelConfig);
export const useVAEModels = () => buildModelsHook(isVAEModelConfigOrSubmodel)();
export const useFluxVAEModels = () => buildModelsHook(isFluxVAEModelConfig)();
export const useGlobalReferenceImageModels = buildModelsHook(
- (config) =>
- isIPAdapterModelConfig(config) ||
- isFluxReduxModelConfig(config) ||
- isChatGPT4oModelConfig(config) ||
- isFluxKontextApiModelConfig(config) ||
- isFluxKontextModelConfig(config) ||
- isGemini2_5ModelConfig(config)
+ (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) || isFluxKontextModelConfig(config)
);
export const useRegionalReferenceImageModels = buildModelsHook(
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config)
);
-export const useVideoModels = buildModelsHook(isVideoModelConfig);
const buildModelsSelector =
(typeGuard: (config: AnyModelConfig) => config is T): Selector =>
@@ -82,13 +71,7 @@ const buildModelsSelector =
};
export const selectIPAdapterModels = buildModelsSelector(isIPAdapterModelConfig);
export const selectGlobalRefImageModels = buildModelsSelector(
- (config) =>
- isIPAdapterModelConfig(config) ||
- isFluxReduxModelConfig(config) ||
- isChatGPT4oModelConfig(config) ||
- isFluxKontextApiModelConfig(config) ||
- isFluxKontextModelConfig(config) ||
- isGemini2_5ModelConfig(config)
+ (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) || isFluxKontextModelConfig(config)
);
export const selectRegionalRefImageModels = buildModelsSelector(
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config)
diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts
index df178225ac0..3769e683618 100644
--- a/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts
+++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts
@@ -1,14 +1,10 @@
import { skipToken } from '@reduxjs/toolkit/query';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectWorkflowFetchDebounce } from 'features/system/store/configSlice';
import { useGetImageWorkflowQuery } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import { useDebounce } from 'use-debounce';
export const useDebouncedImageWorkflow = (imageDTO?: ImageDTO | null) => {
- const workflowFetchDebounce = useAppSelector(selectWorkflowFetchDebounce);
-
- const [debouncedImageName] = useDebounce(imageDTO?.has_workflow ? imageDTO.image_name : null, workflowFetchDebounce);
+ const [debouncedImageName] = useDebounce(imageDTO?.has_workflow ? imageDTO.image_name : null, 300);
const result = useGetImageWorkflowQuery(debouncedImageName ?? skipToken);
diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts
index 4a96867bf5a..7cd41043dbd 100644
--- a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts
+++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts
@@ -1,14 +1,9 @@
import { skipToken } from '@reduxjs/toolkit/query';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectMetadataFetchDebounce } from 'features/system/store/configSlice';
import { imagesApi, useGetImageMetadataQuery } from 'services/api/endpoints/images';
-import { useGetVideoMetadataQuery, videosApi } from 'services/api/endpoints/videos';
import { useDebounce } from 'use-debounce';
export const useDebouncedMetadata = (imageName?: string | null) => {
- const metadataFetchDebounce = useAppSelector(selectMetadataFetchDebounce);
-
- const [debouncedImageName] = useDebounce(imageName, metadataFetchDebounce);
+ const [debouncedImageName] = useDebounce(imageName, 300);
const { currentData: cachedData } = imagesApi.endpoints.getImageMetadata.useQueryState(imageName ?? skipToken);
const { currentData: data, isFetching } = useGetImageMetadataQuery(debouncedImageName ?? skipToken);
@@ -17,16 +12,3 @@ export const useDebouncedMetadata = (imageName?: string | null) => {
isLoading: cachedData ? false : isFetching || imageName !== debouncedImageName,
};
};
-
-export const useDebouncedVideoMetadata = (videoId?: string | null) => {
- const metadataFetchDebounce = useAppSelector(selectMetadataFetchDebounce);
-
- const [debouncedVideoId] = useDebounce(videoId, metadataFetchDebounce);
- const { currentData: cachedData } = videosApi.endpoints.getVideoMetadata.useQueryState(videoId ?? skipToken);
- const { currentData: data, isFetching } = useGetVideoMetadataQuery(debouncedVideoId ?? skipToken);
-
- return {
- metadata: cachedData ?? data,
- isLoading: cachedData ? false : isFetching || videoId !== debouncedVideoId,
- };
-};
diff --git a/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts b/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts
index 5e1f15e7900..adf197b74a9 100644
--- a/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts
+++ b/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts
@@ -1,9 +1,7 @@
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector } from 'app/store/storeHooks';
import { selectModelKey } from 'features/controlLayers/store/paramsSlice';
-import { selectVideoModelKey } from 'features/parameters/store/videoSlice';
import { useGetModelConfigQuery } from 'services/api/endpoints/models';
-import type { VideoApiModelConfig } from 'services/api/types';
export const useSelectedModelConfig = () => {
const key = useAppSelector(selectModelKey);
@@ -11,10 +9,3 @@ export const useSelectedModelConfig = () => {
return modelConfig;
};
-
-export const useSelectedVideoModelConfig = () => {
- const key = useAppSelector(selectVideoModelKey);
- const { data: modelConfig } = useGetModelConfigQuery(key ?? skipToken);
-
- return modelConfig as VideoApiModelConfig | undefined;
-};
diff --git a/invokeai/frontend/web/src/services/api/index.ts b/invokeai/frontend/web/src/services/api/index.ts
index c8876bbffa4..d5b1e4672a8 100644
--- a/invokeai/frontend/web/src/services/api/index.ts
+++ b/invokeai/frontend/web/src/services/api/index.ts
@@ -7,9 +7,6 @@ import type {
TagDescription,
} from '@reduxjs/toolkit/query/react';
import { buildCreateApi, coreModule, fetchBaseQuery, reactHooksModule } from '@reduxjs/toolkit/query/react';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { $baseUrl } from 'app/store/nanostores/baseUrl';
-import { $projectId } from 'app/store/nanostores/projectId';
import queryString from 'query-string';
import stableHash from 'stable-hash';
@@ -19,7 +16,6 @@ const tagTypes = [
'Board',
'BoardImagesTotal',
'BoardAssetsTotal',
- 'BoardVideosTotal',
'HFTokenStatus',
'Image',
'ImageNameList',
@@ -56,12 +52,6 @@ const tagTypes = [
'StylePreset',
'Schema',
'QueueCountsByDestination',
- 'Video',
- 'VideoMetadata',
- 'VideoList',
- 'VideoIdList',
- 'VideoCollectionCounts',
- 'VideoCollection',
// This is invalidated on reconnect. It should be used for queries that have changing data,
// especially related to the queue and generation.
'FetchOnReconnect',
@@ -72,13 +62,10 @@ export const LIST_TAG = 'LIST';
export const LIST_ALL_TAG = 'LIST_ALL';
export const getBaseUrl = (): string => {
- const baseUrl = $baseUrl.get();
- return baseUrl || window.location.href.replace(/\/$/, '');
+ return window.location.href.replace(/\/$/, '');
};
const dynamicBaseQuery: BaseQueryFn = (args, api, extraOptions) => {
- const authToken = $authToken.get();
- const projectId = $projectId.get();
const isOpenAPIRequest =
(args instanceof Object && args.url.includes('openapi.json')) ||
(typeof args === 'string' && args.includes('openapi.json'));
@@ -92,20 +79,6 @@ const dynamicBaseQuery: BaseQueryFn {
- if (authToken) {
- headers.set('Authorization', `Bearer ${authToken}`);
- }
- if (projectId) {
- headers.set('project-id', projectId);
- }
-
- return headers;
- };
- }
-
const rawBaseQuery = fetchBaseQuery(fetchBaseQueryArgs);
return rawBaseQuery(args, api, extraOptions);
diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts
index 3be604b7d75..b52f6eb74c6 100644
--- a/invokeai/frontend/web/src/services/api/schema.ts
+++ b/invokeai/frontend/web/src/services/api/schema.ts
@@ -792,161 +792,6 @@ export type paths = {
patch?: never;
trace?: never;
};
- "/api/v1/videos/i/{video_id}": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- /**
- * Get Video Dto
- * @description Gets a video's DTO
- */
- get: operations["get_video_dto"];
- put?: never;
- post?: never;
- delete?: never;
- options?: never;
- head?: never;
- /**
- * Update Video
- * @description Updates a video
- */
- patch: operations["update_video"];
- trace?: never;
- };
- "/api/v1/videos/delete": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- /** Delete Videos From List */
- post: operations["delete_videos_from_list"];
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/videos/star": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- /** Star Videos In List */
- post: operations["star_videos_in_list"];
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/videos/unstar": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- /** Unstar Videos In List */
- post: operations["unstar_videos_in_list"];
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/videos/uncategorized": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- post?: never;
- /**
- * Delete Uncategorized Videos
- * @description Deletes all videos that are uncategorized
- */
- delete: operations["delete_uncategorized_videos"];
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/videos/": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- /**
- * List Video Dtos
- * @description Lists video DTOs
- */
- get: operations["list_video_dtos"];
- put?: never;
- post?: never;
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/videos/ids": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- /**
- * Get Video Ids
- * @description Gets ordered list of video ids with metadata for optimistic updates
- */
- get: operations["get_video_ids"];
- put?: never;
- post?: never;
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/videos/videos_by_ids": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- /**
- * Get Videos By Ids
- * @description Gets video DTOs for the specified video ids. Maintains order of input ids.
- */
- post: operations["get_videos_by_ids"];
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
"/api/v1/boards/": {
parameters: {
query?: never;
@@ -1083,46 +928,6 @@ export type paths = {
patch?: never;
trace?: never;
};
- "/api/v1/board_videos/batch": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- /**
- * Add Videos To Board
- * @description Adds a list of videos to a board
- */
- post: operations["add_videos_to_board"];
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
- "/api/v1/board_videos/batch/delete": {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- get?: never;
- put?: never;
- /**
- * Remove Videos From Board
- * @description Removes a list of videos from their board, if they had one
- */
- post: operations["remove_videos_from_board"];
- delete?: never;
- options?: never;
- head?: never;
- patch?: never;
- trace?: never;
- };
"/api/v1/model_relationships/i/{model_key}": {
parameters: {
query?: never;
@@ -1221,15 +1026,15 @@ export type paths = {
patch?: never;
trace?: never;
};
- "/api/v1/app/config": {
+ "/api/v1/app/patchmatch_status": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
- /** Get Config */
- get: operations["get_config"];
+ /** Get Patchmatch Status */
+ get: operations["get_patchmatch_status"];
put?: never;
post?: never;
delete?: never;
@@ -2147,19 +1952,6 @@ export type components = {
*/
type: "add";
};
- /** AddVideosToBoardResult */
- AddVideosToBoardResult: {
- /**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
- */
- affected_boards: string[];
- /**
- * Added Videos
- * @description The video ids that were added to the board
- */
- added_videos: string[];
- };
/**
* Alpha Mask to Tensor
* @description Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0.
@@ -2200,33 +1992,7 @@ export type components = {
*/
type: "alpha_mask_to_tensor";
};
- AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
- /**
- * AppConfig
- * @description App Config Response
- */
- AppConfig: {
- /**
- * Infill Methods
- * @description List of available infill methods
- */
- infill_methods: string[];
- /**
- * Upscaling Methods
- * @description List of upscaling methods
- */
- upscaling_methods: components["schemas"]["Upscaler"][];
- /**
- * Nsfw Methods
- * @description List of NSFW checking methods
- */
- nsfw_methods: string[];
- /**
- * Watermarking Methods
- * @description List of invisible watermark methods
- */
- watermarking_methods: string[];
- };
+ AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
/**
* AppVersion
* @description App Version Response
@@ -2237,11 +2003,6 @@ export type components = {
* @description App version
*/
version: string;
- /**
- * Highlights
- * @description Highlights of release
- */
- highlights?: string[] | null;
};
/**
* Apply Tensor Mask to Image
@@ -2383,7 +2144,7 @@ export type components = {
* fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional.
* @enum {string}
*/
- BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "gemini-2.5" | "chatgpt-4o" | "flux-kontext" | "veo3" | "runway" | "unknown";
+ BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "unknown";
/** Batch */
Batch: {
/**
@@ -2707,11 +2468,6 @@ export type components = {
* @description Whether or not the board is archived.
*/
archived: boolean;
- /**
- * Is Private
- * @description Whether the board is private.
- */
- is_private?: boolean | null;
/**
* Image Count
* @description The number of images in the board.
@@ -2722,11 +2478,6 @@ export type components = {
* @description The number of assets in the board.
*/
asset_count: number;
- /**
- * Video Count
- * @description The number of videos in the board.
- */
- video_count: number;
};
/**
* BoardField
@@ -2771,19 +2522,6 @@ export type components = {
*/
image_names: string[];
};
- /** Body_add_videos_to_board */
- Body_add_videos_to_board: {
- /**
- * Board Id
- * @description The id of the board to add to
- */
- board_id: string;
- /**
- * Video Ids
- * @description The ids of the videos to add
- */
- video_ids: string[];
- };
/** Body_cancel_by_batch_ids */
Body_cancel_by_batch_ids: {
/**
@@ -2836,14 +2574,6 @@ export type components = {
*/
image_names: string[];
};
- /** Body_delete_videos_from_list */
- Body_delete_videos_from_list: {
- /**
- * Video Ids
- * @description The list of ids of videos to delete
- */
- video_ids: string[];
- };
/** Body_do_hf_login */
Body_do_hf_login: {
/**
@@ -2900,8 +2630,6 @@ export type components = {
* @default false
*/
prepend?: boolean;
- /** @description The validation run data to use for this batch. This is only used if this is a validation run. */
- validation_run_data?: components["schemas"]["ValidationRunData"] | null;
};
/** Body_get_images_by_names */
Body_get_images_by_names: {
@@ -2919,14 +2647,6 @@ export type components = {
*/
item_ids: number[];
};
- /** Body_get_videos_by_ids */
- Body_get_videos_by_ids: {
- /**
- * Video Ids
- * @description Object containing list of video ids to fetch DTOs for
- */
- video_ids: string[];
- };
/** Body_import_style_presets */
Body_import_style_presets: {
/**
@@ -2977,14 +2697,6 @@ export type components = {
*/
image_names: string[];
};
- /** Body_remove_videos_from_board */
- Body_remove_videos_from_board: {
- /**
- * Video Ids
- * @description The ids of the videos to remove
- */
- video_ids: string[];
- };
/** Body_set_workflow_thumbnail */
Body_set_workflow_thumbnail: {
/**
@@ -3002,14 +2714,6 @@ export type components = {
*/
image_names: string[];
};
- /** Body_star_videos_in_list */
- Body_star_videos_in_list: {
- /**
- * Video Ids
- * @description The list of ids of videos to star
- */
- video_ids: string[];
- };
/** Body_unstar_images_in_list */
Body_unstar_images_in_list: {
/**
@@ -3018,14 +2722,6 @@ export type components = {
*/
image_names: string[];
};
- /** Body_unstar_videos_in_list */
- Body_unstar_videos_in_list: {
- /**
- * Video Ids
- * @description The list of ids of videos to unstar
- */
- video_ids: string[];
- };
/** Body_update_model_image */
Body_update_model_image: {
/**
@@ -3422,11 +3118,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -3503,11 +3194,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -3680,11 +3366,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -5186,11 +4867,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null;
/**
* Base
@@ -5377,11 +5053,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Config Path
* @description Path to the config for this model, if any.
@@ -5456,11 +5127,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Config Path
* @description Path to the config for this model, if any.
@@ -5535,11 +5201,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Config Path
* @description Path to the config for this model, if any.
@@ -5614,11 +5275,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Config Path
* @description Path to the config for this model, if any.
@@ -5693,11 +5349,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -5769,11 +5420,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -5845,11 +5491,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -5921,11 +5562,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -6601,19 +6237,6 @@ export type components = {
*/
deleted_images: string[];
};
- /** DeleteVideosResult */
- DeleteVideosResult: {
- /**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
- */
- affected_boards: string[];
- /**
- * Deleted Videos
- * @description The ids of the videos that were deleted
- */
- deleted_videos: string[];
- };
/**
* Denoise - SD1.5, SDXL
* @description Denoises noisy latents to decodable images
@@ -7517,11 +7140,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default flux_redux
@@ -7809,30 +7427,6 @@ export type components = {
*/
y: number;
};
- /** FieldIdentifier */
- FieldIdentifier: {
- /**
- * Kind
- * @description The kind of field
- * @enum {string}
- */
- kind: "input" | "output";
- /**
- * Node Id
- * @description The ID of the node
- */
- node_id: string;
- /**
- * Field Name
- * @description The name of the field
- */
- field_name: string;
- /**
- * User Label
- * @description The user label of the field, if any
- */
- user_label: string | null;
- };
/**
* FieldKind
* @description The kind of field.
@@ -9644,7 +9238,7 @@ export type components = {
* @description The results of node executions
*/
results: {
- [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"];
+ [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"];
};
/**
* Errors
@@ -10127,11 +9721,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -10200,11 +9789,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -10273,11 +9857,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -10346,11 +9925,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -10419,11 +9993,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -10494,11 +10063,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -10569,11 +10133,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default ip_adapter
@@ -12807,7 +12366,7 @@ export type components = {
* Result
* @description The result of the invocation
*/
- result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"];
+ result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"];
};
/**
* InvocationErrorEvent
@@ -12876,18 +12435,6 @@ export type components = {
* @description The error traceback
*/
error_traceback: string;
- /**
- * User Id
- * @description The ID of the user who created the invocation
- * @default null
- */
- user_id: string | null;
- /**
- * Project Id
- * @description The ID of the user who created the invocation
- * @default null
- */
- project_id: string | null;
};
InvocationOutputMap: {
add: components["schemas"]["IntegerOutput"];
@@ -14718,11 +14265,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Format
* @default diffusers
@@ -14998,11 +14540,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15078,11 +14615,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15158,11 +14690,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15238,11 +14765,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15318,11 +14840,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15398,11 +14915,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15478,11 +14990,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15558,11 +15065,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15638,11 +15140,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -15718,11 +15215,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default lora
@@ -16012,11 +15504,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16101,11 +15588,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16187,11 +15669,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16274,11 +15751,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16361,11 +15833,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16448,11 +15915,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16535,11 +15997,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16617,11 +16074,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16701,11 +16153,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16785,11 +16232,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16874,11 +16316,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16958,11 +16395,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -16993,8 +16425,11 @@ export type components = {
*/
base: "sdxl";
};
- /** Main_ExternalAPI_ChatGPT4o_Config */
- Main_ExternalAPI_ChatGPT4o_Config: {
+ /**
+ * Main_GGUF_FLUX_Config
+ * @description Model config for main checkpoint models.
+ */
+ Main_GGUF_FLUX_Config: {
/**
* Key
* @description A unique key for this model.
@@ -17042,11 +16477,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default main
@@ -17061,442 +16491,481 @@ export type components = {
/** @description Default settings for this model */
default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
/**
- * Format
- * @default api
- * @constant
+ * Config Path
+ * @description Path to the config for this model, if any.
*/
- format: "api";
+ config_path: string | null;
/**
* Base
- * @default chatgpt-4o
+ * @default flux
+ * @constant
+ */
+ base: "flux";
+ /**
+ * Format
+ * @default gguf_quantized
* @constant
*/
- base: "chatgpt-4o";
+ format: "gguf_quantized";
+ variant: components["schemas"]["FluxVariantType"];
};
- /** Main_ExternalAPI_FluxKontext_Config */
- Main_ExternalAPI_FluxKontext_Config: {
+ /**
+ * Combine Masks
+ * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.
+ */
+ MaskCombineInvocation: {
/**
- * Key
- * @description A unique key for this model.
+ * @description The board to save the image to
+ * @default null
*/
- key: string;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Hash
- * @description The hash of the model file(s).
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- hash: string;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- path: string;
+ id: string;
/**
- * File Size
- * @description The size of the model in bytes.
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- file_size: number;
+ is_intermediate?: boolean;
/**
- * Name
- * @description Name of the model.
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- name: string;
+ use_cache?: boolean;
/**
- * Description
- * @description Model description
+ * @description The first mask to combine
+ * @default null
*/
- description: string | null;
+ mask1?: components["schemas"]["ImageField"] | null;
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * @description The second image to combine
+ * @default null
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ mask2?: components["schemas"]["ImageField"] | null;
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * type
+ * @default mask_combine
+ * @constant
*/
- source_api_response: string | null;
+ type: "mask_combine";
+ };
+ /**
+ * Mask Edge
+ * @description Applies an edge mask to an image
+ */
+ MaskEdgeInvocation: {
/**
- * Cover Image
- * @description Url for image to preview model
+ * @description The board to save the image to
+ * @default null
*/
- cover_image: string | null;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Usage Info
- * @description Usage information for this model
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- usage_info: string | null;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Type
- * @default main
- * @constant
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- type: "main";
+ id: string;
/**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ is_intermediate?: boolean;
/**
- * Format
- * @default api
- * @constant
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- format: "api";
+ use_cache?: boolean;
/**
- * Base
- * @default flux-kontext
- * @constant
+ * @description The image to apply the mask to
+ * @default null
*/
- base: "flux-kontext";
- };
- /** Main_ExternalAPI_Gemini2_5_Config */
- Main_ExternalAPI_Gemini2_5_Config: {
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Key
- * @description A unique key for this model.
+ * Edge Size
+ * @description The size of the edge
+ * @default null
*/
- key: string;
+ edge_size?: number | null;
/**
- * Hash
- * @description The hash of the model file(s).
+ * Edge Blur
+ * @description The amount of blur on the edge
+ * @default null
*/
- hash: string;
+ edge_blur?: number | null;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Low Threshold
+ * @description First threshold for the hysteresis procedure in Canny edge detection
+ * @default null
*/
- path: string;
+ low_threshold?: number | null;
/**
- * File Size
- * @description The size of the model in bytes.
+ * High Threshold
+ * @description Second threshold for the hysteresis procedure in Canny edge detection
+ * @default null
*/
- file_size: number;
+ high_threshold?: number | null;
/**
- * Name
- * @description Name of the model.
+ * type
+ * @default mask_edge
+ * @constant
*/
- name: string;
+ type: "mask_edge";
+ };
+ /**
+ * Mask from Alpha
+ * @description Extracts the alpha channel of an image as a mask.
+ */
+ MaskFromAlphaInvocation: {
/**
- * Description
- * @description Model description
+ * @description The board to save the image to
+ * @default null
*/
- description: string | null;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- source_api_response: string | null;
+ id: string;
/**
- * Cover Image
- * @description Url for image to preview model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- cover_image: string | null;
+ is_intermediate?: boolean;
/**
- * Usage Info
- * @description Usage information for this model
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- usage_info: string | null;
+ use_cache?: boolean;
/**
- * Type
- * @default main
- * @constant
- */
- type: "main";
- /**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
+ * @description The image to create the mask from
+ * @default null
*/
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Format
- * @default api
- * @constant
+ * Invert
+ * @description Whether or not to invert the mask
+ * @default false
*/
- format: "api";
+ invert?: boolean;
/**
- * Base
- * @default gemini-2.5
+ * type
+ * @default tomask
* @constant
*/
- base: "gemini-2.5";
+ type: "tomask";
};
- /** Main_ExternalAPI_Imagen3_Config */
- Main_ExternalAPI_Imagen3_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
+ /**
+ * Mask from Segmented Image
+ * @description Generate a mask for a particular color in an ID Map
+ */
+ MaskFromIDInvocation: {
/**
- * Hash
- * @description The hash of the model file(s).
+ * @description The board to save the image to
+ * @default null
*/
- hash: string;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- path: string;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * File Size
- * @description The size of the model in bytes.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- file_size: number;
+ id: string;
/**
- * Name
- * @description Name of the model.
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- name: string;
+ is_intermediate?: boolean;
/**
- * Description
- * @description Model description
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- description: string | null;
+ use_cache?: boolean;
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * @description The image to create the mask from
+ * @default null
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * @description ID color to mask
+ * @default null
*/
- source_api_response: string | null;
+ color?: components["schemas"]["ColorField"] | null;
/**
- * Cover Image
- * @description Url for image to preview model
+ * Threshold
+ * @description Threshold for color detection
+ * @default 100
*/
- cover_image: string | null;
+ threshold?: number;
/**
- * Usage Info
- * @description Usage information for this model
+ * Invert
+ * @description Whether or not to invert the mask
+ * @default false
*/
- usage_info: string | null;
+ invert?: boolean;
/**
- * Type
- * @default main
+ * type
+ * @default mask_from_id
* @constant
*/
- type: "main";
+ type: "mask_from_id";
+ };
+ /**
+ * MaskOutput
+ * @description A torch mask tensor.
+ */
+ MaskOutput: {
+ /** @description The mask. */
+ mask: components["schemas"]["TensorField"];
/**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
+ * Width
+ * @description The width of the mask in pixels.
*/
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ width: number;
/**
- * Format
- * @default api
- * @constant
+ * Height
+ * @description The height of the mask in pixels.
*/
- format: "api";
+ height: number;
/**
- * Base
- * @default imagen3
+ * type
+ * @default mask_output
* @constant
*/
- base: "imagen3";
+ type: "mask_output";
};
- /** Main_ExternalAPI_Imagen4_Config */
- Main_ExternalAPI_Imagen4_Config: {
+ /**
+ * Tensor Mask to Image
+ * @description Convert a mask tensor to an image.
+ */
+ MaskTensorToImageInvocation: {
/**
- * Key
- * @description A unique key for this model.
+ * @description The board to save the image to
+ * @default null
*/
- key: string;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Hash
- * @description The hash of the model file(s).
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- hash: string;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- path: string;
+ id: string;
/**
- * File Size
- * @description The size of the model in bytes.
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- file_size: number;
+ is_intermediate?: boolean;
/**
- * Name
- * @description Name of the model.
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- name: string;
+ use_cache?: boolean;
/**
- * Description
- * @description Model description
+ * @description The mask tensor to convert.
+ * @default null
*/
- description: string | null;
+ mask?: components["schemas"]["TensorField"] | null;
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * type
+ * @default tensor_mask_to_image
+ * @constant
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ type: "tensor_mask_to_image";
+ };
+ /**
+ * MediaPipe Face Detection
+ * @description Detects faces using MediaPipe.
+ */
+ MediaPipeFaceDetectionInvocation: {
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * @description The board to save the image to
+ * @default null
*/
- source_api_response: string | null;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Cover Image
- * @description Url for image to preview model
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- cover_image: string | null;
+ metadata?: components["schemas"]["MetadataField"] | null;
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
/**
- * Usage Info
- * @description Usage information for this model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- usage_info: string | null;
+ is_intermediate?: boolean;
/**
- * Type
- * @default main
- * @constant
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- type: "main";
+ use_cache?: boolean;
/**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
+ * @description The image to process
+ * @default null
*/
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Format
- * @default api
- * @constant
+ * Max Faces
+ * @description Maximum number of faces to detect
+ * @default 1
*/
- format: "api";
+ max_faces?: number;
/**
- * Base
- * @default imagen4
+ * Min Confidence
+ * @description Minimum confidence for face detection
+ * @default 0.5
+ */
+ min_confidence?: number;
+ /**
+ * type
+ * @default mediapipe_face_detection
* @constant
*/
- base: "imagen4";
+ type: "mediapipe_face_detection";
};
/**
- * Main_GGUF_FLUX_Config
- * @description Model config for main checkpoint models.
+ * Metadata Merge
+ * @description Merged a collection of MetadataDict into a single MetadataDict.
*/
- Main_GGUF_FLUX_Config: {
+ MergeMetadataInvocation: {
/**
- * Key
- * @description A unique key for this model.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- key: string;
+ id: string;
/**
- * Hash
- * @description The hash of the model file(s).
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- hash: string;
+ is_intermediate?: boolean;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
+ use_cache?: boolean;
/**
- * Name
- * @description Name of the model.
+ * Collection
+ * @description Collection of Metadata
+ * @default null
*/
- name: string;
+ collection?: components["schemas"]["MetadataField"][] | null;
/**
- * Description
- * @description Model description
+ * type
+ * @default merge_metadata
+ * @constant
*/
- description: string | null;
+ type: "merge_metadata";
+ };
+ /**
+ * Merge Tiles to Image
+ * @description Merge multiple tile images into a single image.
+ */
+ MergeTilesToImageInvocation: {
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * @description The board to save the image to
+ * @default null
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- source_api_response: string | null;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Cover Image
- * @description Url for image to preview model
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- cover_image: string | null;
+ id: string;
/**
- * Usage Info
- * @description Usage information for this model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- usage_info: string | null;
+ is_intermediate?: boolean;
/**
- * Type
- * @default main
- * @constant
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- type: "main";
+ use_cache?: boolean;
/**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
+ * Tiles With Images
+ * @description A list of tile images with tile properties.
+ * @default null
*/
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ tiles_with_images?: components["schemas"]["TileWithImage"][] | null;
/**
- * Config Path
- * @description Path to the config for this model, if any.
+ * Blend Mode
+ * @description blending type Linear or Seam
+ * @default Seam
+ * @enum {string}
*/
- config_path: string | null;
+ blend_mode?: "Linear" | "Seam";
/**
- * Base
- * @default flux
- * @constant
+ * Blend Amount
+ * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles.
+ * @default 32
*/
- base: "flux";
+ blend_amount?: number;
/**
- * Format
- * @default gguf_quantized
+ * type
+ * @default merge_tiles_to_image
* @constant
*/
- format: "gguf_quantized";
- variant: components["schemas"]["FluxVariantType"];
+ type: "merge_tiles_to_image";
};
/**
- * Combine Masks
- * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.
+ * MetadataField
+ * @description Pydantic model for metadata with custom root of type dict[str, Any].
+ * Metadata is stored without a strict schema.
*/
- MaskCombineInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ MetadataField: Record;
+ /**
+ * Metadata Field Extractor
+ * @description Extracts the text value from an image's metadata given a key.
+ * Raises an error if the image has no metadata or if the value is not a string (nesting not permitted).
+ */
+ MetadataFieldExtractorInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -17515,37 +16984,28 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The first mask to combine
+ * @description The image to extract metadata from
* @default null
*/
- mask1?: components["schemas"]["ImageField"] | null;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * @description The second image to combine
+ * Key
+ * @description The key in the image's metadata to extract the value from
* @default null
*/
- mask2?: components["schemas"]["ImageField"] | null;
+ key?: string | null;
/**
* type
- * @default mask_combine
+ * @default metadata_field_extractor
* @constant
*/
- type: "mask_combine";
+ type: "metadata_field_extractor";
};
/**
- * Mask Edge
- * @description Applies an edge mask to an image
+ * Metadata From Image
+ * @description Used to create a core metadata item then Add/Update it to the provided metadata
*/
- MaskEdgeInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ MetadataFromImageInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -17564,56 +17024,70 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to apply the mask to
+ * @description The image to process
* @default null
*/
image?: components["schemas"]["ImageField"] | null;
/**
- * Edge Size
- * @description The size of the edge
- * @default null
+ * type
+ * @default metadata_from_image
+ * @constant
*/
- edge_size?: number | null;
+ type: "metadata_from_image";
+ };
+ /**
+ * Metadata
+ * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict.
+ */
+ MetadataInvocation: {
/**
- * Edge Blur
- * @description The amount of blur on the edge
- * @default null
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- edge_blur?: number | null;
+ id: string;
/**
- * Low Threshold
- * @description First threshold for the hysteresis procedure in Canny edge detection
- * @default null
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- low_threshold?: number | null;
+ is_intermediate?: boolean;
/**
- * High Threshold
- * @description Second threshold for the hysteresis procedure in Canny edge detection
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * Items
+ * @description A single metadata item or collection of metadata items
* @default null
*/
- high_threshold?: number | null;
+ items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null;
/**
* type
- * @default mask_edge
+ * @default metadata
* @constant
*/
- type: "mask_edge";
+ type: "metadata";
};
- /**
- * Mask from Alpha
- * @description Extracts the alpha channel of an image as a mask.
- */
- MaskFromAlphaInvocation: {
+ /** MetadataItemField */
+ MetadataItemField: {
/**
- * @description The board to save the image to
- * @default null
+ * Label
+ * @description Label for this metadata item
*/
- board?: components["schemas"]["BoardField"] | null;
+ label: string;
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Value
+ * @description The value for this metadata item (may be any type)
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ value: unknown;
+ };
+ /**
+ * Metadata Item
+ * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value.
+ */
+ MetadataItemInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -17632,33 +17106,29 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to create the mask from
+ * Label
+ * @description Label for this metadata item
* @default null
*/
- image?: components["schemas"]["ImageField"] | null;
+ label?: string | null;
/**
- * Invert
- * @description Whether or not to invert the mask
- * @default false
+ * Value
+ * @description The value for this metadata item (may be any type)
+ * @default null
*/
- invert?: boolean;
+ value?: unknown | null;
/**
* type
- * @default tomask
+ * @default metadata_item
* @constant
*/
- type: "tomask";
+ type: "metadata_item";
};
/**
- * Mask from Segmented Image
- * @description Generate a mask for a particular color in an ID Map
+ * Metadata Item Linked
+ * @description Used to Create/Add/Update a value into a metadata label
*/
- MaskFromIDInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
+ MetadataItemLinkedInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -17682,68 +17152,61 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to create the mask from
- * @default null
+ * Label
+ * @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
*/
- image?: components["schemas"]["ImageField"] | null;
+ label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt" | "width" | "height" | "seed" | "cfg_scale" | "cfg_rescale_multiplier" | "steps" | "scheduler" | "clip_skip" | "model" | "vae" | "seamless_x" | "seamless_y" | "guidance" | "cfg_scale_start_step" | "cfg_scale_end_step";
/**
- * @description ID color to mask
+ * Custom Label
+ * @description Label for this metadata item
* @default null
*/
- color?: components["schemas"]["ColorField"] | null;
+ custom_label?: string | null;
/**
- * Threshold
- * @description Threshold for color detection
- * @default 100
- */
- threshold?: number;
- /**
- * Invert
- * @description Whether or not to invert the mask
- * @default false
+ * Value
+ * @description The value for this metadata item (may be any type)
+ * @default null
*/
- invert?: boolean;
+ value?: unknown | null;
/**
* type
- * @default mask_from_id
+ * @default metadata_item_linked
* @constant
*/
- type: "mask_from_id";
+ type: "metadata_item_linked";
};
/**
- * MaskOutput
- * @description A torch mask tensor.
+ * MetadataItemOutput
+ * @description Metadata Item Output
*/
- MaskOutput: {
- /** @description The mask. */
- mask: components["schemas"]["TensorField"];
- /**
- * Width
- * @description The width of the mask in pixels.
- */
- width: number;
+ MetadataItemOutput: {
+ /** @description Metadata Item */
+ item: components["schemas"]["MetadataItemField"];
/**
- * Height
- * @description The height of the mask in pixels.
+ * type
+ * @default metadata_item_output
+ * @constant
*/
- height: number;
+ type: "metadata_item_output";
+ };
+ /** MetadataOutput */
+ MetadataOutput: {
+ /** @description Metadata Dict */
+ metadata: components["schemas"]["MetadataField"];
/**
* type
- * @default mask_output
+ * @default metadata_output
* @constant
*/
- type: "mask_output";
+ type: "metadata_output";
};
/**
- * Tensor Mask to Image
- * @description Convert a mask tensor to an image.
+ * Metadata To Bool Collection
+ * @description Extracts a Boolean value Collection of a label from metadata
*/
- MaskTensorToImageInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
+ MetadataToBoolCollectionInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -17767,27 +17230,36 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The mask tensor to convert.
+ * Label
+ * @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
+ */
+ label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y";
+ /**
+ * Custom Label
+ * @description Label for this metadata item
* @default null
*/
- mask?: components["schemas"]["TensorField"] | null;
+ custom_label?: string | null;
+ /**
+ * Default Value
+ * @description The default bool to use if not found in the metadata
+ * @default null
+ */
+ default_value?: boolean[] | null;
/**
* type
- * @default tensor_mask_to_image
+ * @default metadata_to_bool_collection
* @constant
*/
- type: "tensor_mask_to_image";
+ type: "metadata_to_bool_collection";
};
/**
- * MediaPipe Face Detection
- * @description Detects faces using MediaPipe.
+ * Metadata To Bool
+ * @description Extracts a Boolean value of a label from metadata
*/
- MediaPipeFaceDetectionInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
+ MetadataToBoolInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -17811,34 +17283,41 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to process
- * @default null
+ * Label
+ * @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
*/
- image?: components["schemas"]["ImageField"] | null;
+ label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y";
/**
- * Max Faces
- * @description Maximum number of faces to detect
- * @default 1
+ * Custom Label
+ * @description Label for this metadata item
+ * @default null
*/
- max_faces?: number;
+ custom_label?: string | null;
/**
- * Min Confidence
- * @description Minimum confidence for face detection
- * @default 0.5
+ * Default Value
+ * @description The default bool to use if not found in the metadata
+ * @default null
*/
- min_confidence?: number;
+ default_value?: boolean | null;
/**
* type
- * @default mediapipe_face_detection
+ * @default metadata_to_bool
* @constant
*/
- type: "mediapipe_face_detection";
+ type: "metadata_to_bool";
};
/**
- * Metadata Merge
- * @description Merged a collection of MetadataDict into a single MetadataDict.
+ * Metadata To ControlNets
+ * @description Extracts a Controlnets value of a label from metadata
*/
- MergeMetadataInvocation: {
+ MetadataToControlnetsInvocation: {
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -17857,28 +17336,22 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Collection
- * @description Collection of Metadata
+ * ControlNet-List
* @default null
*/
- collection?: components["schemas"]["MetadataField"][] | null;
+ control_list?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null;
/**
* type
- * @default merge_metadata
+ * @default metadata_to_controlnets
* @constant
*/
- type: "merge_metadata";
+ type: "metadata_to_controlnets";
};
/**
- * Merge Tiles to Image
- * @description Merge multiple tile images into a single image.
+ * Metadata To Float Collection
+ * @description Extracts a Float value Collection of a label from metadata
*/
- MergeTilesToImageInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
+ MetadataToFloatCollectionInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -17902,43 +17375,41 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Tiles With Images
- * @description A list of tile images with tile properties.
- * @default null
+ * Label
+ * @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
*/
- tiles_with_images?: components["schemas"]["TileWithImage"][] | null;
+ label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance";
/**
- * Blend Mode
- * @description blending type Linear or Seam
- * @default Seam
- * @enum {string}
+ * Custom Label
+ * @description Label for this metadata item
+ * @default null
*/
- blend_mode?: "Linear" | "Seam";
+ custom_label?: string | null;
/**
- * Blend Amount
- * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles.
- * @default 32
+ * Default Value
+ * @description The default float to use if not found in the metadata
+ * @default null
*/
- blend_amount?: number;
+ default_value?: number[] | null;
/**
* type
- * @default merge_tiles_to_image
+ * @default metadata_to_float_collection
* @constant
*/
- type: "merge_tiles_to_image";
+ type: "metadata_to_float_collection";
};
/**
- * MetadataField
- * @description Pydantic model for metadata with custom root of type dict[str, Any].
- * Metadata is stored without a strict schema.
- */
- MetadataField: Record;
- /**
- * Metadata Field Extractor
- * @description Extracts the text value from an image's metadata given a key.
- * Raises an error if the image has no metadata or if the value is not a string (nesting not permitted).
+ * Metadata To Float
+ * @description Extracts a Float value of a label from metadata
*/
- MetadataFieldExtractorInvocation: {
+ MetadataToFloatInvocation: {
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -17957,28 +17428,41 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to extract metadata from
+ * Label
+ * @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
+ */
+ label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance";
+ /**
+ * Custom Label
+ * @description Label for this metadata item
* @default null
*/
- image?: components["schemas"]["ImageField"] | null;
+ custom_label?: string | null;
/**
- * Key
- * @description The key in the image's metadata to extract the value from
+ * Default Value
+ * @description The default float to use if not found in the metadata
* @default null
*/
- key?: string | null;
+ default_value?: number | null;
/**
* type
- * @default metadata_field_extractor
+ * @default metadata_to_float
* @constant
*/
- type: "metadata_field_extractor";
+ type: "metadata_to_float";
};
/**
- * Metadata From Image
- * @description Used to create a core metadata item then Add/Update it to the provided metadata
+ * Metadata To IP-Adapters
+ * @description Extracts a IP-Adapters value of a label from metadata
*/
- MetadataFromImageInvocation: {
+ MetadataToIPAdaptersInvocation: {
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -17997,22 +17481,28 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to process
+ * IP-Adapter-List
+ * @description IP-Adapter to apply
* @default null
*/
- image?: components["schemas"]["ImageField"] | null;
+ ip_adapter_list?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null;
/**
* type
- * @default metadata_from_image
+ * @default metadata_to_ip_adapters
* @constant
*/
- type: "metadata_from_image";
+ type: "metadata_to_ip_adapters";
};
/**
- * Metadata
- * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict.
+ * Metadata To Integer Collection
+ * @description Extracts an integer value Collection of a label from metadata
*/
- MetadataInvocation: {
+ MetadataToIntegerCollectionInvocation: {
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -18031,36 +17521,41 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Items
- * @description A single metadata item or collection of metadata items
- * @default null
+ * Label
+ * @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
*/
- items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null;
+ label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step";
/**
- * type
- * @default metadata
- * @constant
+ * Custom Label
+ * @description Label for this metadata item
+ * @default null
*/
- type: "metadata";
- };
- /** MetadataItemField */
- MetadataItemField: {
+ custom_label?: string | null;
/**
- * Label
- * @description Label for this metadata item
+ * Default Value
+ * @description The default integer to use if not found in the metadata
+ * @default null
*/
- label: string;
+ default_value?: number[] | null;
/**
- * Value
- * @description The value for this metadata item (may be any type)
+ * type
+ * @default metadata_to_integer_collection
+ * @constant
*/
- value: unknown;
+ type: "metadata_to_integer_collection";
};
/**
- * Metadata Item
- * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value.
+ * Metadata To Integer
+ * @description Extracts an integer value of a label from metadata
*/
- MetadataItemInvocation: {
+ MetadataToIntegerInvocation: {
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -18081,27 +17576,34 @@ export type components = {
/**
* Label
* @description Label for this metadata item
+ * @default * CUSTOM LABEL *
+ * @enum {string}
+ */
+ label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step";
+ /**
+ * Custom Label
+ * @description Label for this metadata item
* @default null
*/
- label?: string | null;
+ custom_label?: string | null;
/**
- * Value
- * @description The value for this metadata item (may be any type)
+ * Default Value
+ * @description The default integer to use if not found in the metadata
* @default null
*/
- value?: unknown | null;
+ default_value?: number | null;
/**
* type
- * @default metadata_item
+ * @default metadata_to_integer
* @constant
*/
- type: "metadata_item";
+ type: "metadata_to_integer";
};
/**
- * Metadata Item Linked
- * @description Used to Create/Add/Update a value into a metadata label
+ * Metadata To LoRA Collection
+ * @description Extracts Lora(s) from metadata into a collection
*/
- MetadataItemLinkedInvocation: {
+ MetadataToLorasCollectionInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18124,62 +17626,47 @@ export type components = {
* @default true
*/
use_cache?: boolean;
- /**
- * Label
- * @description Label for this metadata item
- * @default * CUSTOM LABEL *
- * @enum {string}
- */
- label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt" | "width" | "height" | "seed" | "cfg_scale" | "cfg_rescale_multiplier" | "steps" | "scheduler" | "clip_skip" | "model" | "vae" | "seamless_x" | "seamless_y" | "guidance" | "cfg_scale_start_step" | "cfg_scale_end_step";
/**
* Custom Label
* @description Label for this metadata item
- * @default null
+ * @default loras
*/
- custom_label?: string | null;
+ custom_label?: string;
/**
- * Value
- * @description The value for this metadata item (may be any type)
- * @default null
+ * LoRAs
+ * @description LoRA models and weights. May be a single LoRA or collection.
+ * @default []
*/
- value?: unknown | null;
+ loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null;
/**
* type
- * @default metadata_item_linked
+ * @default metadata_to_lora_collection
* @constant
*/
- type: "metadata_item_linked";
+ type: "metadata_to_lora_collection";
};
/**
- * MetadataItemOutput
- * @description Metadata Item Output
+ * MetadataToLorasCollectionOutput
+ * @description Model loader output
*/
- MetadataItemOutput: {
- /** @description Metadata Item */
- item: components["schemas"]["MetadataItemField"];
+ MetadataToLorasCollectionOutput: {
/**
- * type
- * @default metadata_item_output
- * @constant
+ * LoRAs
+ * @description Collection of LoRA model and weights
*/
- type: "metadata_item_output";
- };
- /** MetadataOutput */
- MetadataOutput: {
- /** @description Metadata Dict */
- metadata: components["schemas"]["MetadataField"];
+ lora: components["schemas"]["LoRAField"][];
/**
* type
- * @default metadata_output
+ * @default metadata_to_lora_collection_output
* @constant
*/
- type: "metadata_output";
+ type: "metadata_to_lora_collection_output";
};
/**
- * Metadata To Bool Collection
- * @description Extracts a Boolean value Collection of a label from metadata
+ * Metadata To LoRAs
+ * @description Extracts a Loras value of a label from metadata
*/
- MetadataToBoolCollectionInvocation: {
+ MetadataToLorasInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18203,36 +17690,29 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Label
- * @description Label for this metadata item
- * @default * CUSTOM LABEL *
- * @enum {string}
- */
- label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y";
- /**
- * Custom Label
- * @description Label for this metadata item
+ * UNet
+ * @description UNet (scheduler, LoRAs)
* @default null
*/
- custom_label?: string | null;
+ unet?: components["schemas"]["UNetField"] | null;
/**
- * Default Value
- * @description The default bool to use if not found in the metadata
+ * CLIP
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- default_value?: boolean[] | null;
+ clip?: components["schemas"]["CLIPField"] | null;
/**
* type
- * @default metadata_to_bool_collection
+ * @default metadata_to_loras
* @constant
*/
- type: "metadata_to_bool_collection";
+ type: "metadata_to_loras";
};
/**
- * Metadata To Bool
- * @description Extracts a Boolean value of a label from metadata
+ * Metadata To Model
+ * @description Extracts a Model value of a label from metadata
*/
- MetadataToBoolInvocation: {
+ MetadataToModelInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18258,10 +17738,10 @@ export type components = {
/**
* Label
* @description Label for this metadata item
- * @default * CUSTOM LABEL *
+ * @default model
* @enum {string}
*/
- label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y";
+ label?: "* CUSTOM LABEL *" | "model";
/**
* Custom Label
* @description Label for this metadata item
@@ -18269,62 +17749,59 @@ export type components = {
*/
custom_label?: string | null;
/**
- * Default Value
- * @description The default bool to use if not found in the metadata
+ * @description The default model to use if not found in the metadata
* @default null
*/
- default_value?: boolean | null;
+ default_value?: components["schemas"]["ModelIdentifierField"] | null;
/**
* type
- * @default metadata_to_bool
+ * @default metadata_to_model
* @constant
*/
- type: "metadata_to_bool";
+ type: "metadata_to_model";
};
/**
- * Metadata To ControlNets
- * @description Extracts a Controlnets value of a label from metadata
+ * MetadataToModelOutput
+ * @description String to main model output
*/
- MetadataToControlnetsInvocation: {
+ MetadataToModelOutput: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Model
+ * @description Main model (UNet, VAE, CLIP) to load
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ model: components["schemas"]["ModelIdentifierField"];
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Name
+ * @description Model Name
*/
- id: string;
+ name: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * UNet
+ * @description UNet (scheduler, LoRAs)
*/
- is_intermediate?: boolean;
+ unet: components["schemas"]["UNetField"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * VAE
+ * @description VAE
*/
- use_cache?: boolean;
+ vae: components["schemas"]["VAEField"];
/**
- * ControlNet-List
- * @default null
+ * CLIP
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
*/
- control_list?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null;
+ clip: components["schemas"]["CLIPField"];
/**
* type
- * @default metadata_to_controlnets
+ * @default metadata_to_model_output
* @constant
*/
- type: "metadata_to_controlnets";
+ type: "metadata_to_model_output";
};
/**
- * Metadata To Float Collection
- * @description Extracts a Float value Collection of a label from metadata
+ * Metadata To SDXL LoRAs
+ * @description Extracts a SDXL Loras value of a label from metadata
*/
- MetadataToFloatCollectionInvocation: {
+ MetadataToSDXLLorasInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18348,36 +17825,35 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Label
- * @description Label for this metadata item
- * @default * CUSTOM LABEL *
- * @enum {string}
+ * UNet
+ * @description UNet (scheduler, LoRAs)
+ * @default null
*/
- label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance";
+ unet?: components["schemas"]["UNetField"] | null;
/**
- * Custom Label
- * @description Label for this metadata item
+ * CLIP 1
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- custom_label?: string | null;
+ clip?: components["schemas"]["CLIPField"] | null;
/**
- * Default Value
- * @description The default float to use if not found in the metadata
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- default_value?: number[] | null;
+ clip2?: components["schemas"]["CLIPField"] | null;
/**
* type
- * @default metadata_to_float_collection
+ * @default metadata_to_sdlx_loras
* @constant
*/
- type: "metadata_to_float_collection";
+ type: "metadata_to_sdlx_loras";
};
/**
- * Metadata To Float
- * @description Extracts a Float value of a label from metadata
+ * Metadata To SDXL Model
+ * @description Extracts a SDXL Model value of a label from metadata
*/
- MetadataToFloatInvocation: {
+ MetadataToSDXLModelInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18403,10 +17879,10 @@ export type components = {
/**
* Label
* @description Label for this metadata item
- * @default * CUSTOM LABEL *
+ * @default model
* @enum {string}
*/
- label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance";
+ label?: "* CUSTOM LABEL *" | "model";
/**
* Custom Label
* @description Label for this metadata item
@@ -18414,63 +17890,64 @@ export type components = {
*/
custom_label?: string | null;
/**
- * Default Value
- * @description The default float to use if not found in the metadata
+ * @description The default SDXL Model to use if not found in the metadata
* @default null
*/
- default_value?: number | null;
+ default_value?: components["schemas"]["ModelIdentifierField"] | null;
/**
* type
- * @default metadata_to_float
+ * @default metadata_to_sdxl_model
* @constant
*/
- type: "metadata_to_float";
+ type: "metadata_to_sdxl_model";
};
/**
- * Metadata To IP-Adapters
- * @description Extracts a IP-Adapters value of a label from metadata
+ * MetadataToSDXLModelOutput
+ * @description String to SDXL main model output
*/
- MetadataToIPAdaptersInvocation: {
+ MetadataToSDXLModelOutput: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Model
+ * @description Main model (UNet, VAE, CLIP) to load
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ model: components["schemas"]["ModelIdentifierField"];
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Name
+ * @description Model Name
*/
- id: string;
+ name: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * UNet
+ * @description UNet (scheduler, LoRAs)
*/
- is_intermediate?: boolean;
+ unet: components["schemas"]["UNetField"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * CLIP 1
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
*/
- use_cache?: boolean;
+ clip: components["schemas"]["CLIPField"];
/**
- * IP-Adapter-List
- * @description IP-Adapter to apply
- * @default null
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
*/
- ip_adapter_list?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null;
+ clip2: components["schemas"]["CLIPField"];
+ /**
+ * VAE
+ * @description VAE
+ */
+ vae: components["schemas"]["VAEField"];
/**
* type
- * @default metadata_to_ip_adapters
+ * @default metadata_to_sdxl_model_output
* @constant
*/
- type: "metadata_to_ip_adapters";
+ type: "metadata_to_sdxl_model_output";
};
/**
- * Metadata To Integer Collection
- * @description Extracts an integer value Collection of a label from metadata
+ * Metadata To Scheduler
+ * @description Extracts a Scheduler value of a label from metadata
*/
- MetadataToIntegerCollectionInvocation: {
+ MetadataToSchedulerInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18496,10 +17973,10 @@ export type components = {
/**
* Label
* @description Label for this metadata item
- * @default * CUSTOM LABEL *
+ * @default scheduler
* @enum {string}
*/
- label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step";
+ label?: "* CUSTOM LABEL *" | "scheduler";
/**
* Custom Label
* @description Label for this metadata item
@@ -18508,22 +17985,23 @@ export type components = {
custom_label?: string | null;
/**
* Default Value
- * @description The default integer to use if not found in the metadata
- * @default null
+ * @description The default scheduler to use if not found in the metadata
+ * @default euler
+ * @enum {string}
*/
- default_value?: number[] | null;
+ default_value?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
/**
* type
- * @default metadata_to_integer_collection
+ * @default metadata_to_scheduler
* @constant
*/
- type: "metadata_to_integer_collection";
+ type: "metadata_to_scheduler";
};
/**
- * Metadata To Integer
- * @description Extracts an integer value of a label from metadata
+ * Metadata To String Collection
+ * @description Extracts a string collection value of a label from metadata
*/
- MetadataToIntegerInvocation: {
+ MetadataToStringCollectionInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18552,7 +18030,7 @@ export type components = {
* @default * CUSTOM LABEL *
* @enum {string}
*/
- label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step";
+ label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt";
/**
* Custom Label
* @description Label for this metadata item
@@ -18561,22 +18039,22 @@ export type components = {
custom_label?: string | null;
/**
* Default Value
- * @description The default integer to use if not found in the metadata
+ * @description The default string collection to use if not found in the metadata
* @default null
*/
- default_value?: number | null;
+ default_value?: string[] | null;
/**
* type
- * @default metadata_to_integer
+ * @default metadata_to_string_collection
* @constant
*/
- type: "metadata_to_integer";
+ type: "metadata_to_string_collection";
};
/**
- * Metadata To LoRA Collection
- * @description Extracts Lora(s) from metadata into a collection
+ * Metadata To String
+ * @description Extracts a string value of a label from metadata
*/
- MetadataToLorasCollectionInvocation: {
+ MetadataToStringInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18600,46 +18078,36 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Custom Label
+ * Label
* @description Label for this metadata item
- * @default loras
- */
- custom_label?: string;
- /**
- * LoRAs
- * @description LoRA models and weights. May be a single LoRA or collection.
- * @default []
+ * @default * CUSTOM LABEL *
+ * @enum {string}
*/
- loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null;
+ label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt";
/**
- * type
- * @default metadata_to_lora_collection
- * @constant
+ * Custom Label
+ * @description Label for this metadata item
+ * @default null
*/
- type: "metadata_to_lora_collection";
- };
- /**
- * MetadataToLorasCollectionOutput
- * @description Model loader output
- */
- MetadataToLorasCollectionOutput: {
+ custom_label?: string | null;
/**
- * LoRAs
- * @description Collection of LoRA model and weights
+ * Default Value
+ * @description The default string to use if not found in the metadata
+ * @default null
*/
- lora: components["schemas"]["LoRAField"][];
+ default_value?: string | null;
/**
* type
- * @default metadata_to_lora_collection_output
+ * @default metadata_to_string
* @constant
*/
- type: "metadata_to_lora_collection_output";
+ type: "metadata_to_string";
};
/**
- * Metadata To LoRAs
- * @description Extracts a Loras value of a label from metadata
+ * Metadata To T2I-Adapters
+ * @description Extracts a T2I-Adapters value of a label from metadata
*/
- MetadataToLorasInvocation: {
+ MetadataToT2IAdaptersInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18663,29 +18131,23 @@ export type components = {
*/
use_cache?: boolean;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
- * @default null
- */
- unet?: components["schemas"]["UNetField"] | null;
- /**
- * CLIP
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * T2I-Adapter
+ * @description IP-Adapter to apply
* @default null
*/
- clip?: components["schemas"]["CLIPField"] | null;
+ t2i_adapter_list?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null;
/**
* type
- * @default metadata_to_loras
+ * @default metadata_to_t2i_adapters
* @constant
*/
- type: "metadata_to_loras";
+ type: "metadata_to_t2i_adapters";
};
/**
- * Metadata To Model
- * @description Extracts a Model value of a label from metadata
+ * Metadata To VAE
+ * @description Extracts a VAE value of a label from metadata
*/
- MetadataToModelInvocation: {
+ MetadataToVAEInvocation: {
/**
* @description Optional metadata to be saved with the image
* @default null
@@ -18711,10 +18173,10 @@ export type components = {
/**
* Label
* @description Label for this metadata item
- * @default model
+ * @default vae
* @enum {string}
*/
- label?: "* CUSTOM LABEL *" | "model";
+ label?: "* CUSTOM LABEL *" | "vae";
/**
* Custom Label
* @description Label for this metadata item
@@ -18722,64 +18184,57 @@ export type components = {
*/
custom_label?: string | null;
/**
- * @description The default model to use if not found in the metadata
+ * @description The default VAE to use if not found in the metadata
* @default null
*/
- default_value?: components["schemas"]["ModelIdentifierField"] | null;
+ default_value?: components["schemas"]["VAEField"] | null;
/**
* type
- * @default metadata_to_model
+ * @default metadata_to_vae
* @constant
*/
- type: "metadata_to_model";
+ type: "metadata_to_vae";
};
/**
- * MetadataToModelOutput
- * @description String to main model output
+ * ModelFormat
+ * @description Storage format of model.
+ * @enum {string}
*/
- MetadataToModelOutput: {
- /**
- * Model
- * @description Main model (UNet, VAE, CLIP) to load
- */
- model: components["schemas"]["ModelIdentifierField"];
- /**
- * Name
- * @description Model Name
- */
- name: string;
+ ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "unknown";
+ /** ModelIdentifierField */
+ ModelIdentifierField: {
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * Key
+ * @description The model's unique key
*/
- unet: components["schemas"]["UNetField"];
+ key: string;
/**
- * VAE
- * @description VAE
+ * Hash
+ * @description The model's BLAKE3 hash
*/
- vae: components["schemas"]["VAEField"];
+ hash: string;
/**
- * CLIP
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Name
+ * @description The model's name
*/
- clip: components["schemas"]["CLIPField"];
+ name: string;
+ /** @description The model's base model type */
+ base: components["schemas"]["BaseModelType"];
+ /** @description The model's type */
+ type: components["schemas"]["ModelType"];
/**
- * type
- * @default metadata_to_model_output
- * @constant
+ * @description The submodel to load, if this is a main model
+ * @default null
*/
- type: "metadata_to_model_output";
+ submodel_type?: components["schemas"]["SubModelType"] | null;
};
/**
- * Metadata To SDXL LoRAs
- * @description Extracts a SDXL Loras value of a label from metadata
+ * Any Model
+ * @description Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
+ * input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an
+ * error.
*/
- MetadataToSDXLLorasInvocation: {
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ ModelIdentifierInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -18798,967 +18253,875 @@ export type components = {
*/
use_cache?: boolean;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
- * @default null
- */
- unet?: components["schemas"]["UNetField"] | null;
- /**
- * CLIP 1
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
- * @default null
- */
- clip?: components["schemas"]["CLIPField"] | null;
- /**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Model
+ * @description The model to select
* @default null
*/
- clip2?: components["schemas"]["CLIPField"] | null;
+ model?: components["schemas"]["ModelIdentifierField"] | null;
/**
* type
- * @default metadata_to_sdlx_loras
+ * @default model_identifier
* @constant
*/
- type: "metadata_to_sdlx_loras";
+ type: "model_identifier";
};
/**
- * Metadata To SDXL Model
- * @description Extracts a SDXL Model value of a label from metadata
+ * ModelIdentifierOutput
+ * @description Model identifier output
*/
- MetadataToSDXLModelInvocation: {
+ ModelIdentifierOutput: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Model
+ * @description Model identifier
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ model: components["schemas"]["ModelIdentifierField"];
+ /**
+ * type
+ * @default model_identifier_output
+ * @constant
+ */
+ type: "model_identifier_output";
+ };
+ /**
+ * ModelInstallCancelledEvent
+ * @description Event model for model_install_cancelled
+ */
+ ModelInstallCancelledEvent: {
+ /**
+ * Timestamp
+ * @description The timestamp of the event
+ */
+ timestamp: number;
/**
* Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * @description The ID of the install job
*/
- id: string;
+ id: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- is_intermediate?: boolean;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ };
+ /**
+ * ModelInstallCompleteEvent
+ * @description Event model for model_install_complete
+ */
+ ModelInstallCompleteEvent: {
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Timestamp
+ * @description The timestamp of the event
*/
- use_cache?: boolean;
+ timestamp: number;
/**
- * Label
- * @description Label for this metadata item
- * @default model
- * @enum {string}
+ * Id
+ * @description The ID of the install job
*/
- label?: "* CUSTOM LABEL *" | "model";
+ id: number;
/**
- * Custom Label
- * @description Label for this metadata item
- * @default null
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- custom_label?: string | null;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
/**
- * @description The default SDXL Model to use if not found in the metadata
- * @default null
+ * Key
+ * @description Model config record key
*/
- default_value?: components["schemas"]["ModelIdentifierField"] | null;
+ key: string;
/**
- * type
- * @default metadata_to_sdxl_model
- * @constant
+ * Total Bytes
+ * @description Size of the model (may be None for installation of a local path)
*/
- type: "metadata_to_sdxl_model";
+ total_bytes: number | null;
+ /**
+ * Config
+ * @description The installed model's config
+ */
+ config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
};
/**
- * MetadataToSDXLModelOutput
- * @description String to SDXL main model output
+ * ModelInstallDownloadProgressEvent
+ * @description Event model for model_install_download_progress
*/
- MetadataToSDXLModelOutput: {
+ ModelInstallDownloadProgressEvent: {
/**
- * Model
- * @description Main model (UNet, VAE, CLIP) to load
+ * Timestamp
+ * @description The timestamp of the event
*/
- model: components["schemas"]["ModelIdentifierField"];
+ timestamp: number;
/**
- * Name
- * @description Model Name
+ * Id
+ * @description The ID of the install job
*/
- name: string;
+ id: number;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- unet: components["schemas"]["UNetField"];
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
/**
- * CLIP 1
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Local Path
+ * @description Where model is downloading to
*/
- clip: components["schemas"]["CLIPField"];
+ local_path: string;
/**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Bytes
+ * @description Number of bytes downloaded so far
*/
- clip2: components["schemas"]["CLIPField"];
+ bytes: number;
/**
- * VAE
- * @description VAE
+ * Total Bytes
+ * @description Total size of download, including all files
*/
- vae: components["schemas"]["VAEField"];
+ total_bytes: number;
/**
- * type
- * @default metadata_to_sdxl_model_output
- * @constant
+ * Parts
+ * @description Progress of downloading URLs that comprise the model, if any
*/
- type: "metadata_to_sdxl_model_output";
+ parts: {
+ [key: string]: number | string;
+ }[];
};
/**
- * Metadata To Scheduler
- * @description Extracts a Scheduler value of a label from metadata
+ * ModelInstallDownloadStartedEvent
+ * @description Event model for model_install_download_started
*/
- MetadataToSchedulerInvocation: {
+ ModelInstallDownloadStartedEvent: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Timestamp
+ * @description The timestamp of the event
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ timestamp: number;
/**
* Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
- /**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * @description The ID of the install job
*/
- is_intermediate?: boolean;
+ id: number;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- use_cache?: boolean;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
/**
- * Label
- * @description Label for this metadata item
- * @default scheduler
- * @enum {string}
+ * Local Path
+ * @description Where model is downloading to
*/
- label?: "* CUSTOM LABEL *" | "scheduler";
+ local_path: string;
/**
- * Custom Label
- * @description Label for this metadata item
- * @default null
+ * Bytes
+ * @description Number of bytes downloaded so far
*/
- custom_label?: string | null;
+ bytes: number;
/**
- * Default Value
- * @description The default scheduler to use if not found in the metadata
- * @default euler
- * @enum {string}
+ * Total Bytes
+ * @description Total size of download, including all files
*/
- default_value?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
+ total_bytes: number;
/**
- * type
- * @default metadata_to_scheduler
- * @constant
+ * Parts
+ * @description Progress of downloading URLs that comprise the model, if any
*/
- type: "metadata_to_scheduler";
+ parts: {
+ [key: string]: number | string;
+ }[];
};
/**
- * Metadata To String Collection
- * @description Extracts a string collection value of a label from metadata
+ * ModelInstallDownloadsCompleteEvent
+ * @description Emitted once when an install job becomes active.
*/
- MetadataToStringCollectionInvocation: {
+ ModelInstallDownloadsCompleteEvent: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Timestamp
+ * @description The timestamp of the event
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ timestamp: number;
/**
* Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * @description The ID of the install job
*/
- id: string;
+ id: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- is_intermediate?: boolean;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ };
+ /**
+ * ModelInstallErrorEvent
+ * @description Event model for model_install_error
+ */
+ ModelInstallErrorEvent: {
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Timestamp
+ * @description The timestamp of the event
*/
- use_cache?: boolean;
+ timestamp: number;
/**
- * Label
- * @description Label for this metadata item
- * @default * CUSTOM LABEL *
- * @enum {string}
+ * Id
+ * @description The ID of the install job
*/
- label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt";
+ id: number;
/**
- * Custom Label
- * @description Label for this metadata item
- * @default null
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- custom_label?: string | null;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
/**
- * Default Value
- * @description The default string collection to use if not found in the metadata
- * @default null
+ * Error Type
+ * @description The name of the exception
*/
- default_value?: string[] | null;
+ error_type: string;
/**
- * type
- * @default metadata_to_string_collection
- * @constant
+ * Error
+ * @description A text description of the exception
*/
- type: "metadata_to_string_collection";
+ error: string;
};
/**
- * Metadata To String
- * @description Extracts a string value of a label from metadata
+ * ModelInstallJob
+ * @description Object that tracks the current status of an install request.
*/
- MetadataToStringInvocation: {
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ ModelInstallJob: {
/**
* Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * @description Unique ID for this job
*/
- id: string;
+ id: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * @description Current status of install process
+ * @default waiting
*/
- is_intermediate?: boolean;
+ status?: components["schemas"]["InstallStatus"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Error Reason
+ * @description Information about why the job failed
*/
- use_cache?: boolean;
+ error_reason?: string | null;
+ /** @description Configuration information (e.g. 'description') to apply to model. */
+ config_in?: components["schemas"]["ModelRecordChanges"];
/**
- * Label
- * @description Label for this metadata item
- * @default * CUSTOM LABEL *
- * @enum {string}
+ * Config Out
+ * @description After successful installation, this will hold the configuration object.
*/
- label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt";
+ config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null;
/**
- * Custom Label
- * @description Label for this metadata item
- * @default null
+ * Inplace
+ * @description Leave model in its current location; otherwise install under models directory
+ * @default false
*/
- custom_label?: string | null;
+ inplace?: boolean;
/**
- * Default Value
- * @description The default string to use if not found in the metadata
- * @default null
+ * Source
+ * @description Source (URL, repo_id, or local path) of model
*/
- default_value?: string | null;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
/**
- * type
- * @default metadata_to_string
- * @constant
+ * Local Path
+ * Format: path
+ * @description Path to locally-downloaded model; may be the same as the source
*/
- type: "metadata_to_string";
- };
- /**
- * Metadata To T2I-Adapters
- * @description Extracts a T2I-Adapters value of a label from metadata
- */
- MetadataToT2IAdaptersInvocation: {
+ local_path: string;
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Bytes
+ * @description For a remote model, the number of bytes downloaded so far (may not be available)
+ * @default 0
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ bytes?: number;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Total Bytes
+ * @description Total size of the model to be installed
+ * @default 0
*/
- id: string;
+ total_bytes?: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Source Metadata
+ * @description Metadata provided by the model source
*/
- is_intermediate?: boolean;
+ source_metadata?: (components["schemas"]["BaseMetadata"] | components["schemas"]["HuggingFaceMetadata"]) | null;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Download Parts
+ * @description Download jobs contributing to this install
*/
- use_cache?: boolean;
+ download_parts?: components["schemas"]["DownloadJob"][];
/**
- * T2I-Adapter
- * @description IP-Adapter to apply
- * @default null
+ * Error
+ * @description On an error condition, this field will contain the text of the exception
*/
- t2i_adapter_list?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null;
+ error?: string | null;
/**
- * type
- * @default metadata_to_t2i_adapters
- * @constant
+ * Error Traceback
+ * @description On an error condition, this field will contain the exception traceback
*/
- type: "metadata_to_t2i_adapters";
+ error_traceback?: string | null;
};
/**
- * Metadata To VAE
- * @description Extracts a VAE value of a label from metadata
+ * ModelInstallStartedEvent
+ * @description Event model for model_install_started
*/
- MetadataToVAEInvocation: {
+ ModelInstallStartedEvent: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Timestamp
+ * @description The timestamp of the event
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ timestamp: number;
/**
* Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
- /**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * @description The ID of the install job
*/
- is_intermediate?: boolean;
+ id: number;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Source
+ * @description Source of the model; local path, repo_id or url
*/
- use_cache?: boolean;
+ source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ };
+ /**
+ * ModelLoadCompleteEvent
+ * @description Event model for model_load_complete
+ */
+ ModelLoadCompleteEvent: {
/**
- * Label
- * @description Label for this metadata item
- * @default vae
- * @enum {string}
+ * Timestamp
+ * @description The timestamp of the event
*/
- label?: "* CUSTOM LABEL *" | "vae";
+ timestamp: number;
/**
- * Custom Label
- * @description Label for this metadata item
- * @default null
+ * Config
+ * @description The model's config
*/
- custom_label?: string | null;
+ config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
/**
- * @description The default VAE to use if not found in the metadata
+ * @description The submodel type, if any
* @default null
*/
- default_value?: components["schemas"]["VAEField"] | null;
- /**
- * type
- * @default metadata_to_vae
- * @constant
- */
- type: "metadata_to_vae";
+ submodel_type: components["schemas"]["SubModelType"] | null;
};
/**
- * ModelFormat
- * @description Storage format of model.
- * @enum {string}
+ * ModelLoadStartedEvent
+ * @description Event model for model_load_started
*/
- ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api" | "unknown";
- /** ModelIdentifierField */
- ModelIdentifierField: {
- /**
- * Key
- * @description The model's unique key
- */
- key: string;
+ ModelLoadStartedEvent: {
/**
- * Hash
- * @description The model's BLAKE3 hash
+ * Timestamp
+ * @description The timestamp of the event
*/
- hash: string;
+ timestamp: number;
/**
- * Name
- * @description The model's name
+ * Config
+ * @description The model's config
*/
- name: string;
- /** @description The model's base model type */
- base: components["schemas"]["BaseModelType"];
- /** @description The model's type */
- type: components["schemas"]["ModelType"];
+ config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
/**
- * @description The submodel to load, if this is a main model
+ * @description The submodel type, if any
* @default null
*/
- submodel_type?: components["schemas"]["SubModelType"] | null;
+ submodel_type: components["schemas"]["SubModelType"] | null;
};
/**
- * Any Model
- * @description Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
- * input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an
- * error.
+ * ModelLoaderOutput
+ * @description Model loader output
*/
- ModelIdentifierInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
+ ModelLoaderOutput: {
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * VAE
+ * @description VAE
*/
- is_intermediate?: boolean;
+ vae: components["schemas"]["VAEField"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * type
+ * @default model_loader_output
+ * @constant
*/
- use_cache?: boolean;
+ type: "model_loader_output";
/**
- * Model
- * @description The model to select
- * @default null
+ * CLIP
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
*/
- model?: components["schemas"]["ModelIdentifierField"] | null;
+ clip: components["schemas"]["CLIPField"];
/**
- * type
- * @default model_identifier
- * @constant
+ * UNet
+ * @description UNet (scheduler, LoRAs)
*/
- type: "model_identifier";
+ unet: components["schemas"]["UNetField"];
};
/**
- * ModelIdentifierOutput
- * @description Model identifier output
+ * ModelRecordChanges
+ * @description A set of changes to apply to a model.
*/
- ModelIdentifierOutput: {
+ ModelRecordChanges: {
/**
- * Model
- * @description Model identifier
+ * Source
+ * @description original source of the model
*/
- model: components["schemas"]["ModelIdentifierField"];
+ source?: string | null;
+ /** @description type of model source */
+ source_type?: components["schemas"]["ModelSourceType"] | null;
/**
- * type
- * @default model_identifier_output
- * @constant
- */
- type: "model_identifier_output";
- };
- /**
- * ModelInstallCancelledEvent
- * @description Event model for model_install_cancelled
- */
- ModelInstallCancelledEvent: {
- /**
- * Timestamp
- * @description The timestamp of the event
+ * Source Api Response
+ * @description metadata from remote source
*/
- timestamp: number;
+ source_api_response?: string | null;
/**
- * Id
- * @description The ID of the install job
+ * Name
+ * @description Name of the model.
*/
- id: number;
+ name?: string | null;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Path
+ * @description Path to the model.
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
- };
- /**
- * ModelInstallCompleteEvent
- * @description Event model for model_install_complete
- */
- ModelInstallCompleteEvent: {
+ path?: string | null;
/**
- * Timestamp
- * @description The timestamp of the event
+ * Description
+ * @description Model description
*/
- timestamp: number;
+ description?: string | null;
+ /** @description The base model. */
+ base?: components["schemas"]["BaseModelType"] | null;
+ /** @description Type of model */
+ type?: components["schemas"]["ModelType"] | null;
/**
- * Id
- * @description The ID of the install job
+ * Key
+ * @description Database ID for this model
*/
- id: number;
+ key?: string | null;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Hash
+ * @description hash of model file
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ hash?: string | null;
/**
- * Key
- * @description Model config record key
+ * File Size
+ * @description Size of model file
*/
- key: string;
+ file_size?: number | null;
/**
- * Total Bytes
- * @description Size of the model (may be None for installation of a local path)
+ * Format
+ * @description format of model file
*/
- total_bytes: number | null;
+ format?: string | null;
/**
- * Config
- * @description The installed model's config
+ * Trigger Phrases
+ * @description Set of trigger phrases for this model
*/
- config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
- };
- /**
- * ModelInstallDownloadProgressEvent
- * @description Event model for model_install_download_progress
- */
- ModelInstallDownloadProgressEvent: {
+ trigger_phrases?: string[] | null;
/**
- * Timestamp
- * @description The timestamp of the event
+ * Default Settings
+ * @description Default settings for this model
*/
- timestamp: number;
+ default_settings?: components["schemas"]["MainModelDefaultSettings"] | components["schemas"]["LoraModelDefaultSettings"] | components["schemas"]["ControlAdapterDefaultSettings"] | null;
/**
- * Id
- * @description The ID of the install job
+ * Variant
+ * @description The variant of the model.
*/
- id: number;
+ variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null;
+ /** @description The prediction type of the model. */
+ prediction_type?: components["schemas"]["SchedulerPredictionType"] | null;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Upcast Attention
+ * @description Whether to upcast attention.
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ upcast_attention?: boolean | null;
/**
- * Local Path
- * @description Where model is downloading to
+ * Config Path
+ * @description Path to config file for model
*/
- local_path: string;
+ config_path?: string | null;
+ };
+ /** ModelRelationshipBatchRequest */
+ ModelRelationshipBatchRequest: {
/**
- * Bytes
- * @description Number of bytes downloaded so far
+ * Model Keys
+ * @description List of model keys to fetch related models for
*/
- bytes: number;
+ model_keys: string[];
+ };
+ /** ModelRelationshipCreateRequest */
+ ModelRelationshipCreateRequest: {
/**
- * Total Bytes
- * @description Total size of download, including all files
+ * Model Key 1
+ * @description The key of the first model in the relationship
*/
- total_bytes: number;
+ model_key_1: string;
/**
- * Parts
- * @description Progress of downloading URLs that comprise the model, if any
+ * Model Key 2
+ * @description The key of the second model in the relationship
*/
- parts: {
- [key: string]: number | string;
- }[];
+ model_key_2: string;
};
/**
- * ModelInstallDownloadStartedEvent
- * @description Event model for model_install_download_started
+ * ModelRepoVariant
+ * @description Various hugging face variants on the diffusers format.
+ * @enum {string}
*/
- ModelInstallDownloadStartedEvent: {
- /**
- * Timestamp
- * @description The timestamp of the event
- */
- timestamp: number;
+ ModelRepoVariant: "" | "fp16" | "fp32" | "onnx" | "openvino" | "flax";
+ /**
+ * ModelSourceType
+ * @description Model source type.
+ * @enum {string}
+ */
+ ModelSourceType: "path" | "url" | "hf_repo_id";
+ /**
+ * ModelType
+ * @description Model type.
+ * @enum {string}
+ */
+ ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "unknown";
+ /**
+ * ModelVariantType
+ * @description Variant type.
+ * @enum {string}
+ */
+ ModelVariantType: "normal" | "inpaint" | "depth";
+ /**
+ * ModelsList
+ * @description Return list of configs.
+ */
+ ModelsList: {
+ /** Models */
+ models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[];
+ };
+ /**
+ * Multiply Integers
+ * @description Multiplies two numbers
+ */
+ MultiplyInvocation: {
/**
* Id
- * @description The ID of the install job
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- id: number;
+ id: string;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ is_intermediate?: boolean;
/**
- * Local Path
- * @description Where model is downloading to
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- local_path: string;
+ use_cache?: boolean;
/**
- * Bytes
- * @description Number of bytes downloaded so far
+ * A
+ * @description The first number
+ * @default 0
*/
- bytes: number;
+ a?: number;
/**
- * Total Bytes
- * @description Total size of download, including all files
+ * B
+ * @description The second number
+ * @default 0
*/
- total_bytes: number;
+ b?: number;
/**
- * Parts
- * @description Progress of downloading URLs that comprise the model, if any
+ * type
+ * @default mul
+ * @constant
*/
- parts: {
- [key: string]: number | string;
- }[];
+ type: "mul";
};
- /**
- * ModelInstallDownloadsCompleteEvent
- * @description Emitted once when an install job becomes active.
- */
- ModelInstallDownloadsCompleteEvent: {
+ /** NodeFieldValue */
+ NodeFieldValue: {
/**
- * Timestamp
- * @description The timestamp of the event
+ * Node Path
+ * @description The node into which this batch data item will be substituted.
*/
- timestamp: number;
+ node_path: string;
/**
- * Id
- * @description The ID of the install job
+ * Field Name
+ * @description The field into which this batch data item will be substituted.
*/
- id: number;
+ field_name: string;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Value
+ * @description The value to substitute into the node/field.
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ value: string | number | components["schemas"]["ImageField"];
};
/**
- * ModelInstallErrorEvent
- * @description Event model for model_install_error
+ * Create Latent Noise
+ * @description Generates latent noise.
*/
- ModelInstallErrorEvent: {
- /**
- * Timestamp
- * @description The timestamp of the event
- */
- timestamp: number;
+ NoiseInvocation: {
/**
* Id
- * @description The ID of the install job
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- id: number;
+ id: string;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ is_intermediate?: boolean;
/**
- * Error Type
- * @description The name of the exception
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- error_type: string;
+ use_cache?: boolean;
/**
- * Error
- * @description A text description of the exception
+ * Seed
+ * @description Seed for random number generation
+ * @default 0
*/
- error: string;
- };
- /**
- * ModelInstallJob
- * @description Object that tracks the current status of an install request.
- */
- ModelInstallJob: {
+ seed?: number;
/**
- * Id
- * @description Unique ID for this job
+ * Width
+ * @description Width of output (px)
+ * @default 512
*/
- id: number;
+ width?: number;
/**
- * @description Current status of install process
- * @default waiting
+ * Height
+ * @description Height of output (px)
+ * @default 512
*/
- status?: components["schemas"]["InstallStatus"];
+ height?: number;
/**
- * Error Reason
- * @description Information about why the job failed
+ * Use Cpu
+ * @description Use CPU for noise generation (for reproducible results across platforms)
+ * @default true
*/
- error_reason?: string | null;
- /** @description Configuration information (e.g. 'description') to apply to model. */
- config_in?: components["schemas"]["ModelRecordChanges"];
+ use_cpu?: boolean;
/**
- * Config Out
- * @description After successful installation, this will hold the configuration object.
+ * type
+ * @default noise
+ * @constant
*/
- config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]) | null;
+ type: "noise";
+ };
+ /**
+ * NoiseOutput
+ * @description Invocation noise output
+ */
+ NoiseOutput: {
+ /** @description Noise tensor */
+ noise: components["schemas"]["LatentsField"];
/**
- * Inplace
- * @description Leave model in its current location; otherwise install under models directory
- * @default false
+ * Width
+ * @description Width of output (px)
*/
- inplace?: boolean;
+ width: number;
/**
- * Source
- * @description Source (URL, repo_id, or local path) of model
+ * Height
+ * @description Height of output (px)
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ height: number;
/**
- * Local Path
- * Format: path
- * @description Path to locally-downloaded model; may be the same as the source
+ * type
+ * @default noise_output
+ * @constant
*/
- local_path: string;
+ type: "noise_output";
+ };
+ /**
+ * Normal Map
+ * @description Generates a normal map.
+ */
+ NormalMapInvocation: {
/**
- * Bytes
- * @description For a remote model, the number of bytes downloaded so far (may not be available)
- * @default 0
+ * @description The board to save the image to
+ * @default null
*/
- bytes?: number;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Total Bytes
- * @description Total size of the model to be installed
- * @default 0
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- total_bytes?: number;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Source Metadata
- * @description Metadata provided by the model source
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- source_metadata?: (components["schemas"]["BaseMetadata"] | components["schemas"]["HuggingFaceMetadata"]) | null;
+ id: string;
/**
- * Download Parts
- * @description Download jobs contributing to this install
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- download_parts?: components["schemas"]["DownloadJob"][];
+ is_intermediate?: boolean;
/**
- * Error
- * @description On an error condition, this field will contain the text of the exception
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- error?: string | null;
+ use_cache?: boolean;
/**
- * Error Traceback
- * @description On an error condition, this field will contain the exception traceback
+ * @description The image to process
+ * @default null
*/
- error_traceback?: string | null;
+ image?: components["schemas"]["ImageField"] | null;
+ /**
+ * type
+ * @default normal_map
+ * @constant
+ */
+ type: "normal_map";
};
- /**
- * ModelInstallStartedEvent
- * @description Event model for model_install_started
- */
- ModelInstallStartedEvent: {
+ /** OffsetPaginatedResults[BoardDTO] */
+ OffsetPaginatedResults_BoardDTO_: {
/**
- * Timestamp
- * @description The timestamp of the event
+ * Limit
+ * @description Limit of items to get
*/
- timestamp: number;
+ limit: number;
/**
- * Id
- * @description The ID of the install job
+ * Offset
+ * @description Offset from which to retrieve items
*/
- id: number;
+ offset: number;
/**
- * Source
- * @description Source of the model; local path, repo_id or url
+ * Total
+ * @description Total number of items in result
*/
- source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"];
+ total: number;
+ /**
+ * Items
+ * @description Items
+ */
+ items: components["schemas"]["BoardDTO"][];
};
- /**
- * ModelLoadCompleteEvent
- * @description Event model for model_load_complete
- */
- ModelLoadCompleteEvent: {
+ /** OffsetPaginatedResults[ImageDTO] */
+ OffsetPaginatedResults_ImageDTO_: {
/**
- * Timestamp
- * @description The timestamp of the event
+ * Limit
+ * @description Limit of items to get
*/
- timestamp: number;
+ limit: number;
/**
- * Config
- * @description The model's config
+ * Offset
+ * @description Offset from which to retrieve items
*/
- config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
+ offset: number;
/**
- * @description The submodel type, if any
- * @default null
+ * Total
+ * @description Total number of items in result
*/
- submodel_type: components["schemas"]["SubModelType"] | null;
+ total: number;
+ /**
+ * Items
+ * @description Items
+ */
+ items: components["schemas"]["ImageDTO"][];
};
/**
- * ModelLoadStartedEvent
- * @description Event model for model_load_started
+ * OutputFieldJSONSchemaExtra
+ * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor
+ * during schema parsing and UI rendering.
*/
- ModelLoadStartedEvent: {
- /**
- * Timestamp
- * @description The timestamp of the event
- */
- timestamp: number;
+ OutputFieldJSONSchemaExtra: {
+ field_kind: components["schemas"]["FieldKind"];
/**
- * Config
- * @description The model's config
+ * Ui Hidden
+ * @default false
*/
- config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
+ ui_hidden: boolean;
/**
- * @description The submodel type, if any
+ * Ui Order
* @default null
*/
- submodel_type: components["schemas"]["SubModelType"] | null;
+ ui_order: number | null;
+ /** @default null */
+ ui_type: components["schemas"]["UIType"] | null;
};
- /**
- * ModelLoaderOutput
- * @description Model loader output
- */
- ModelLoaderOutput: {
+ /** PaginatedResults[WorkflowRecordListItemWithThumbnailDTO] */
+ PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_: {
/**
- * VAE
- * @description VAE
+ * Page
+ * @description Current Page
*/
- vae: components["schemas"]["VAEField"];
+ page: number;
/**
- * type
- * @default model_loader_output
- * @constant
+ * Pages
+ * @description Total number of pages
*/
- type: "model_loader_output";
+ pages: number;
/**
- * CLIP
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Per Page
+ * @description Number of items per page
*/
- clip: components["schemas"]["CLIPField"];
+ per_page: number;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * Total
+ * @description Total number of items in result
*/
- unet: components["schemas"]["UNetField"];
+ total: number;
+ /**
+ * Items
+ * @description Items
+ */
+ items: components["schemas"]["WorkflowRecordListItemWithThumbnailDTO"][];
};
/**
- * ModelRecordChanges
- * @description A set of changes to apply to a model.
+ * Pair Tile with Image
+ * @description Pair an image with its tile properties.
*/
- ModelRecordChanges: {
- /**
- * Source
- * @description original source of the model
- */
- source?: string | null;
- /** @description type of model source */
- source_type?: components["schemas"]["ModelSourceType"] | null;
- /**
- * Source Api Response
- * @description metadata from remote source
- */
- source_api_response?: string | null;
- /**
- * Name
- * @description Name of the model.
- */
- name?: string | null;
- /**
- * Path
- * @description Path to the model.
- */
- path?: string | null;
- /**
- * Description
- * @description Model description
- */
- description?: string | null;
- /** @description The base model. */
- base?: components["schemas"]["BaseModelType"] | null;
- /** @description Type of model */
- type?: components["schemas"]["ModelType"] | null;
- /**
- * Key
- * @description Database ID for this model
- */
- key?: string | null;
- /**
- * Hash
- * @description hash of model file
- */
- hash?: string | null;
- /**
- * File Size
- * @description Size of model file
- */
- file_size?: number | null;
+ PairTileImageInvocation: {
/**
- * Format
- * @description format of model file
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- format?: string | null;
+ id: string;
/**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- trigger_phrases?: string[] | null;
+ is_intermediate?: boolean;
/**
- * Default Settings
- * @description Default settings for this model
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- default_settings?: components["schemas"]["MainModelDefaultSettings"] | components["schemas"]["LoraModelDefaultSettings"] | components["schemas"]["ControlAdapterDefaultSettings"] | null;
+ use_cache?: boolean;
/**
- * Variant
- * @description The variant of the model.
+ * @description The tile image.
+ * @default null
*/
- variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null;
- /** @description The prediction type of the model. */
- prediction_type?: components["schemas"]["SchedulerPredictionType"] | null;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Upcast Attention
- * @description Whether to upcast attention.
+ * @description The tile properties.
+ * @default null
*/
- upcast_attention?: boolean | null;
+ tile?: components["schemas"]["Tile"] | null;
/**
- * Config Path
- * @description Path to config file for model
+ * type
+ * @default pair_tile_image
+ * @constant
*/
- config_path?: string | null;
+ type: "pair_tile_image";
};
- /** ModelRelationshipBatchRequest */
- ModelRelationshipBatchRequest: {
+ /** PairTileImageOutput */
+ PairTileImageOutput: {
+ /** @description A tile description with its corresponding image. */
+ tile_with_image: components["schemas"]["TileWithImage"];
/**
- * Model Keys
- * @description List of model keys to fetch related models for
+ * type
+ * @default pair_tile_image_output
+ * @constant
*/
- model_keys: string[];
+ type: "pair_tile_image_output";
};
- /** ModelRelationshipCreateRequest */
- ModelRelationshipCreateRequest: {
+ /**
+ * Paste Image into Bounding Box
+ * @description Paste the source image into the target image at the given bounding box.
+ *
+ * The source image must be the same size as the bounding box, and the bounding box must fit within the target image.
+ */
+ PasteImageIntoBoundingBoxInvocation: {
/**
- * Model Key 1
- * @description The key of the first model in the relationship
+ * @description The board to save the image to
+ * @default null
*/
- model_key_1: string;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Model Key 2
- * @description The key of the second model in the relationship
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- model_key_2: string;
- };
- /**
- * ModelRepoVariant
- * @description Various hugging face variants on the diffusers format.
- * @enum {string}
- */
- ModelRepoVariant: "" | "fp16" | "fp32" | "onnx" | "openvino" | "flax";
- /**
- * ModelSourceType
- * @description Model source type.
- * @enum {string}
- */
- ModelSourceType: "path" | "url" | "hf_repo_id";
- /**
- * ModelType
- * @description Model type.
- * @enum {string}
- */
- ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video" | "unknown";
- /**
- * ModelVariantType
- * @description Variant type.
- * @enum {string}
- */
- ModelVariantType: "normal" | "inpaint" | "depth";
- /**
- * ModelsList
- * @description Return list of configs.
- */
- ModelsList: {
- /** Models */
- models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"])[];
- };
- /**
- * Multiply Integers
- * @description Multiplies two numbers
- */
- MultiplyInvocation: {
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -19777,47 +19140,42 @@ export type components = {
*/
use_cache?: boolean;
/**
- * A
- * @description The first number
- * @default 0
+ * @description The image to paste
+ * @default null
*/
- a?: number;
+ source_image?: components["schemas"]["ImageField"] | null;
/**
- * B
- * @description The second number
- * @default 0
+ * @description The image to paste into
+ * @default null
*/
- b?: number;
+ target_image?: components["schemas"]["ImageField"] | null;
+ /**
+ * @description The bounding box to paste the image into
+ * @default null
+ */
+ bounding_box?: components["schemas"]["BoundingBoxField"] | null;
/**
* type
- * @default mul
+ * @default paste_image_into_bounding_box
* @constant
*/
- type: "mul";
+ type: "paste_image_into_bounding_box";
};
- /** NodeFieldValue */
- NodeFieldValue: {
- /**
- * Node Path
- * @description The node into which this batch data item will be substituted.
- */
- node_path: string;
+ /**
+ * PiDiNet Edge Detection
+ * @description Generates an edge map using PiDiNet.
+ */
+ PiDiNetEdgeDetectionInvocation: {
/**
- * Field Name
- * @description The field into which this batch data item will be substituted.
+ * @description The board to save the image to
+ * @default null
*/
- field_name: string;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * Value
- * @description The value to substitute into the node/field.
+ * @description Optional metadata to be saved with the image
+ * @default null
*/
- value: string | number | components["schemas"]["ImageField"];
- };
- /**
- * Create Latent Noise
- * @description Generates latent noise.
- */
- NoiseInvocation: {
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -19836,75 +19194,73 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Seed
- * @description Seed for random number generation
- * @default 0
- */
- seed?: number;
- /**
- * Width
- * @description Width of output (px)
- * @default 512
+ * @description The image to process
+ * @default null
*/
- width?: number;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Height
- * @description Height of output (px)
- * @default 512
+ * Quantize Edges
+ * @description Whether or not to use safe mode
+ * @default false
*/
- height?: number;
+ quantize_edges?: boolean;
/**
- * Use Cpu
- * @description Use CPU for noise generation (for reproducible results across platforms)
- * @default true
+ * Scribble
+ * @description Whether or not to use scribble mode
+ * @default false
*/
- use_cpu?: boolean;
+ scribble?: boolean;
/**
* type
- * @default noise
+ * @default pidi_edge_detection
* @constant
*/
- type: "noise";
+ type: "pidi_edge_detection";
};
- /**
- * NoiseOutput
- * @description Invocation noise output
- */
- NoiseOutput: {
- /** @description Noise tensor */
- noise: components["schemas"]["LatentsField"];
- /**
+ /** PresetData */
+ PresetData: {
+ /**
+ * Positive Prompt
+ * @description Positive prompt
+ */
+ positive_prompt: string;
+ /**
+ * Negative Prompt
+ * @description Negative prompt
+ */
+ negative_prompt: string;
+ };
+ /**
+ * PresetType
+ * @enum {string}
+ */
+ PresetType: "user" | "default";
+ /**
+ * ProgressImage
+ * @description The progress image sent intermittently during processing
+ */
+ ProgressImage: {
+ /**
* Width
- * @description Width of output (px)
+ * @description The effective width of the image in pixels
*/
width: number;
/**
* Height
- * @description Height of output (px)
+ * @description The effective height of the image in pixels
*/
height: number;
/**
- * type
- * @default noise_output
- * @constant
+ * Dataurl
+ * @description The image data as a b64 data URL
*/
- type: "noise_output";
+ dataURL: string;
};
/**
- * Normal Map
- * @description Generates a normal map.
+ * Prompts from File
+ * @description Loads prompts from a text file
*/
- NormalMapInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ PromptsFromFileInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -19923,201 +19279,188 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to process
+ * File Path
+ * @description Path to prompt text file
* @default null
*/
- image?: components["schemas"]["ImageField"] | null;
+ file_path?: string | null;
/**
- * type
- * @default normal_map
- * @constant
+ * Pre Prompt
+ * @description String to prepend to each prompt
+ * @default null
*/
- type: "normal_map";
- };
- /** OffsetPaginatedResults[BoardDTO] */
- OffsetPaginatedResults_BoardDTO_: {
+ pre_prompt?: string | null;
/**
- * Limit
- * @description Limit of items to get
+ * Post Prompt
+ * @description String to append to each prompt
+ * @default null
*/
- limit: number;
+ post_prompt?: string | null;
/**
- * Offset
- * @description Offset from which to retrieve items
+ * Start Line
+ * @description Line in the file to start start from
+ * @default 1
*/
- offset: number;
+ start_line?: number;
/**
- * Total
- * @description Total number of items in result
+ * Max Prompts
+ * @description Max lines to read from file (0=all)
+ * @default 1
*/
- total: number;
+ max_prompts?: number;
/**
- * Items
- * @description Items
+ * type
+ * @default prompt_from_file
+ * @constant
*/
- items: components["schemas"]["BoardDTO"][];
+ type: "prompt_from_file";
};
- /** OffsetPaginatedResults[ImageDTO] */
- OffsetPaginatedResults_ImageDTO_: {
- /**
- * Limit
- * @description Limit of items to get
- */
- limit: number;
+ /**
+ * PruneResult
+ * @description Result of pruning the session queue
+ */
+ PruneResult: {
/**
- * Offset
- * @description Offset from which to retrieve items
+ * Deleted
+ * @description Number of queue items deleted
*/
- offset: number;
+ deleted: number;
+ };
+ /**
+ * QueueClearedEvent
+ * @description Event model for queue_cleared
+ */
+ QueueClearedEvent: {
/**
- * Total
- * @description Total number of items in result
+ * Timestamp
+ * @description The timestamp of the event
*/
- total: number;
+ timestamp: number;
/**
- * Items
- * @description Items
+ * Queue Id
+ * @description The ID of the queue
*/
- items: components["schemas"]["ImageDTO"][];
+ queue_id: string;
};
- /** OffsetPaginatedResults[VideoDTO] */
- OffsetPaginatedResults_VideoDTO_: {
- /**
- * Limit
- * @description Limit of items to get
- */
- limit: number;
+ /**
+ * QueueItemStatusChangedEvent
+ * @description Event model for queue_item_status_changed
+ */
+ QueueItemStatusChangedEvent: {
/**
- * Offset
- * @description Offset from which to retrieve items
+ * Timestamp
+ * @description The timestamp of the event
*/
- offset: number;
+ timestamp: number;
/**
- * Total
- * @description Total number of items in result
+ * Queue Id
+ * @description The ID of the queue
*/
- total: number;
+ queue_id: string;
/**
- * Items
- * @description Items
+ * Item Id
+ * @description The ID of the queue item
*/
- items: components["schemas"]["VideoDTO"][];
- };
- /**
- * OutputFieldJSONSchemaExtra
- * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor
- * during schema parsing and UI rendering.
- */
- OutputFieldJSONSchemaExtra: {
- field_kind: components["schemas"]["FieldKind"];
+ item_id: number;
/**
- * Ui Hidden
- * @default false
+ * Batch Id
+ * @description The ID of the queue batch
*/
- ui_hidden: boolean;
+ batch_id: string;
/**
- * Ui Order
+ * Origin
+ * @description The origin of the queue item
* @default null
*/
- ui_order: number | null;
- /** @default null */
- ui_type: components["schemas"]["UIType"] | null;
- };
- /** PaginatedResults[WorkflowRecordListItemWithThumbnailDTO] */
- PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_: {
- /**
- * Page
- * @description Current Page
- */
- page: number;
+ origin: string | null;
/**
- * Pages
- * @description Total number of pages
+ * Destination
+ * @description The destination of the queue item
+ * @default null
*/
- pages: number;
+ destination: string | null;
/**
- * Per Page
- * @description Number of items per page
+ * Status
+ * @description The new status of the queue item
+ * @enum {string}
*/
- per_page: number;
+ status: "pending" | "in_progress" | "completed" | "failed" | "canceled";
/**
- * Total
- * @description Total number of items in result
+ * Error Type
+ * @description The error type, if any
+ * @default null
*/
- total: number;
+ error_type: string | null;
/**
- * Items
- * @description Items
+ * Error Message
+ * @description The error message, if any
+ * @default null
*/
- items: components["schemas"]["WorkflowRecordListItemWithThumbnailDTO"][];
- };
- /**
- * Pair Tile with Image
- * @description Pair an image with its tile properties.
- */
- PairTileImageInvocation: {
+ error_message: string | null;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Error Traceback
+ * @description The error traceback, if any
+ * @default null
*/
- id: string;
+ error_traceback: string | null;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Created At
+ * @description The timestamp when the queue item was created
*/
- is_intermediate?: boolean;
+ created_at: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Updated At
+ * @description The timestamp when the queue item was last updated
*/
- use_cache?: boolean;
+ updated_at: string;
/**
- * @description The tile image.
+ * Started At
+ * @description The timestamp when the queue item was started
* @default null
*/
- image?: components["schemas"]["ImageField"] | null;
+ started_at: string | null;
/**
- * @description The tile properties.
+ * Completed At
+ * @description The timestamp when the queue item was completed
* @default null
*/
- tile?: components["schemas"]["Tile"] | null;
+ completed_at: string | null;
+ /** @description The status of the batch */
+ batch_status: components["schemas"]["BatchStatus"];
+ /** @description The status of the queue */
+ queue_status: components["schemas"]["SessionQueueStatus"];
/**
- * type
- * @default pair_tile_image
- * @constant
- */
- type: "pair_tile_image";
- };
- /** PairTileImageOutput */
- PairTileImageOutput: {
- /** @description A tile description with its corresponding image. */
- tile_with_image: components["schemas"]["TileWithImage"];
- /**
- * type
- * @default pair_tile_image_output
- * @constant
+ * Session Id
+ * @description The ID of the session (aka graph execution state)
*/
- type: "pair_tile_image_output";
+ session_id: string;
};
/**
- * Paste Image into Bounding Box
- * @description Paste the source image into the target image at the given bounding box.
- *
- * The source image must be the same size as the bounding box, and the bounding box must fit within the target image.
+ * QueueItemsRetriedEvent
+ * @description Event model for queue_items_retried
*/
- PasteImageIntoBoundingBoxInvocation: {
+ QueueItemsRetriedEvent: {
/**
- * @description The board to save the image to
- * @default null
+ * Timestamp
+ * @description The timestamp of the event
*/
- board?: components["schemas"]["BoardField"] | null;
+ timestamp: number;
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Queue Id
+ * @description The ID of the queue
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ queue_id: string;
+ /**
+ * Retried Item Ids
+ * @description The IDs of the queue items that were retried
+ */
+ retried_item_ids: number[];
+ };
+ /**
+ * Random Float
+ * @description Outputs a single random float
+ */
+ RandomFloatInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20132,46 +19475,39 @@ export type components = {
/**
* Use Cache
* @description Whether or not to use the cache
- * @default true
+ * @default false
*/
use_cache?: boolean;
/**
- * @description The image to paste
- * @default null
+ * Low
+ * @description The inclusive low value
+ * @default 0
*/
- source_image?: components["schemas"]["ImageField"] | null;
+ low?: number;
/**
- * @description The image to paste into
- * @default null
+ * High
+ * @description The exclusive high value
+ * @default 1
*/
- target_image?: components["schemas"]["ImageField"] | null;
+ high?: number;
/**
- * @description The bounding box to paste the image into
- * @default null
+ * Decimals
+ * @description The number of decimal places to round to
+ * @default 2
*/
- bounding_box?: components["schemas"]["BoundingBoxField"] | null;
+ decimals?: number;
/**
* type
- * @default paste_image_into_bounding_box
+ * @default rand_float
* @constant
*/
- type: "paste_image_into_bounding_box";
+ type: "rand_float";
};
/**
- * PiDiNet Edge Detection
- * @description Generates an edge map using PiDiNet.
+ * Random Integer
+ * @description Outputs a single random integer.
*/
- PiDiNetEdgeDetectionInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ RandomIntInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20186,77 +19522,86 @@ export type components = {
/**
* Use Cache
* @description Whether or not to use the cache
- * @default true
+ * @default false
*/
use_cache?: boolean;
/**
- * @description The image to process
- * @default null
- */
- image?: components["schemas"]["ImageField"] | null;
- /**
- * Quantize Edges
- * @description Whether or not to use safe mode
- * @default false
+ * Low
+ * @description The inclusive low value
+ * @default 0
*/
- quantize_edges?: boolean;
+ low?: number;
/**
- * Scribble
- * @description Whether or not to use scribble mode
- * @default false
+ * High
+ * @description The exclusive high value
+ * @default 2147483647
*/
- scribble?: boolean;
+ high?: number;
/**
* type
- * @default pidi_edge_detection
+ * @default rand_int
* @constant
*/
- type: "pidi_edge_detection";
+ type: "rand_int";
};
- /** PresetData */
- PresetData: {
+ /**
+ * Random Range
+ * @description Creates a collection of random numbers
+ */
+ RandomRangeInvocation: {
/**
- * Positive Prompt
- * @description Positive prompt
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- positive_prompt: string;
+ id: string;
/**
- * Negative Prompt
- * @description Negative prompt
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- negative_prompt: string;
- };
- /**
- * PresetType
- * @enum {string}
- */
- PresetType: "user" | "default" | "project";
- /**
- * ProgressImage
- * @description The progress image sent intermittently during processing
- */
- ProgressImage: {
+ is_intermediate?: boolean;
/**
- * Width
- * @description The effective width of the image in pixels
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default false
*/
- width: number;
+ use_cache?: boolean;
/**
- * Height
- * @description The effective height of the image in pixels
+ * Low
+ * @description The inclusive low value
+ * @default 0
*/
- height: number;
+ low?: number;
/**
- * Dataurl
- * @description The image data as a b64 data URL
+ * High
+ * @description The exclusive high value
+ * @default 2147483647
*/
- dataURL: string;
+ high?: number;
+ /**
+ * Size
+ * @description The number of values to generate
+ * @default 1
+ */
+ size?: number;
+ /**
+ * Seed
+ * @description The seed for the RNG (omit for random)
+ * @default 0
+ */
+ seed?: number;
+ /**
+ * type
+ * @default random_range
+ * @constant
+ */
+ type: "random_range";
};
/**
- * Prompts from File
- * @description Loads prompts from a text file
+ * Integer Range
+ * @description Creates a range of numbers from start to stop with step
*/
- PromptsFromFileInvocation: {
+ RangeInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20275,194 +19620,194 @@ export type components = {
*/
use_cache?: boolean;
/**
- * File Path
- * @description Path to prompt text file
- * @default null
- */
- file_path?: string | null;
- /**
- * Pre Prompt
- * @description String to prepend to each prompt
- * @default null
- */
- pre_prompt?: string | null;
- /**
- * Post Prompt
- * @description String to append to each prompt
- * @default null
+ * Start
+ * @description The start of the range
+ * @default 0
*/
- post_prompt?: string | null;
+ start?: number;
/**
- * Start Line
- * @description Line in the file to start start from
- * @default 1
+ * Stop
+ * @description The stop of the range
+ * @default 10
*/
- start_line?: number;
+ stop?: number;
/**
- * Max Prompts
- * @description Max lines to read from file (0=all)
+ * Step
+ * @description The step of the range
* @default 1
*/
- max_prompts?: number;
+ step?: number;
/**
* type
- * @default prompt_from_file
+ * @default range
* @constant
*/
- type: "prompt_from_file";
+ type: "range";
};
/**
- * PruneResult
- * @description Result of pruning the session queue
+ * Integer Range of Size
+ * @description Creates a range from start to start + (size * step) incremented by step
*/
- PruneResult: {
+ RangeOfSizeInvocation: {
/**
- * Deleted
- * @description Number of queue items deleted
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- deleted: number;
- };
- /**
- * QueueClearedEvent
- * @description Event model for queue_cleared
- */
- QueueClearedEvent: {
+ id: string;
/**
- * Timestamp
- * @description The timestamp of the event
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- timestamp: number;
+ is_intermediate?: boolean;
/**
- * Queue Id
- * @description The ID of the queue
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- queue_id: string;
- };
- /**
- * QueueItemStatusChangedEvent
- * @description Event model for queue_item_status_changed
- */
- QueueItemStatusChangedEvent: {
+ use_cache?: boolean;
/**
- * Timestamp
- * @description The timestamp of the event
+ * Start
+ * @description The start of the range
+ * @default 0
*/
- timestamp: number;
+ start?: number;
/**
- * Queue Id
- * @description The ID of the queue
+ * Size
+ * @description The number of values
+ * @default 1
*/
- queue_id: string;
+ size?: number;
/**
- * Item Id
- * @description The ID of the queue item
+ * Step
+ * @description The step of the range
+ * @default 1
*/
- item_id: number;
+ step?: number;
/**
- * Batch Id
- * @description The ID of the queue batch
+ * type
+ * @default range_of_size
+ * @constant
*/
- batch_id: string;
+ type: "range_of_size";
+ };
+ /**
+ * Create Rectangle Mask
+ * @description Create a rectangular mask.
+ */
+ RectangleMaskInvocation: {
/**
- * Origin
- * @description The origin of the queue item
+ * @description Optional metadata to be saved with the image
* @default null
*/
- origin: string | null;
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
- * Destination
- * @description The destination of the queue item
- * @default null
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- destination: string | null;
+ id: string;
/**
- * Status
- * @description The new status of the queue item
- * @enum {string}
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- status: "pending" | "in_progress" | "completed" | "failed" | "canceled";
+ is_intermediate?: boolean;
/**
- * Error Type
- * @description The error type, if any
- * @default null
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- error_type: string | null;
+ use_cache?: boolean;
/**
- * Error Message
- * @description The error message, if any
+ * Width
+ * @description The width of the entire mask.
* @default null
*/
- error_message: string | null;
+ width?: number | null;
/**
- * Error Traceback
- * @description The error traceback, if any
+ * Height
+ * @description The height of the entire mask.
* @default null
*/
- error_traceback: string | null;
- /**
- * Created At
- * @description The timestamp when the queue item was created
- */
- created_at: string;
+ height?: number | null;
/**
- * Updated At
- * @description The timestamp when the queue item was last updated
+ * X Left
+ * @description The left x-coordinate of the rectangular masked region (inclusive).
+ * @default null
*/
- updated_at: string;
+ x_left?: number | null;
/**
- * Started At
- * @description The timestamp when the queue item was started
+ * Y Top
+ * @description The top y-coordinate of the rectangular masked region (inclusive).
* @default null
*/
- started_at: string | null;
+ y_top?: number | null;
/**
- * Completed At
- * @description The timestamp when the queue item was completed
+ * Rectangle Width
+ * @description The width of the rectangular masked region.
* @default null
*/
- completed_at: string | null;
- /** @description The status of the batch */
- batch_status: components["schemas"]["BatchStatus"];
- /** @description The status of the queue */
- queue_status: components["schemas"]["SessionQueueStatus"];
+ rectangle_width?: number | null;
/**
- * Session Id
- * @description The ID of the session (aka graph execution state)
+ * Rectangle Height
+ * @description The height of the rectangular masked region.
+ * @default null
*/
- session_id: string;
+ rectangle_height?: number | null;
/**
- * Credits
- * @description The total credits used for this queue item
- * @default null
+ * type
+ * @default rectangle_mask
+ * @constant
*/
- credits: number | null;
+ type: "rectangle_mask";
};
/**
- * QueueItemsRetriedEvent
- * @description Event model for queue_items_retried
+ * RemoteModelFile
+ * @description Information about a downloadable file that forms part of a model.
*/
- QueueItemsRetriedEvent: {
+ RemoteModelFile: {
/**
- * Timestamp
- * @description The timestamp of the event
+ * Url
+ * Format: uri
+ * @description The url to download this model file
*/
- timestamp: number;
+ url: string;
/**
- * Queue Id
- * @description The ID of the queue
+ * Path
+ * Format: path
+ * @description The path to the file, relative to the model root
*/
- queue_id: string;
+ path: string;
/**
- * Retried Item Ids
- * @description The IDs of the queue items that were retried
+ * Size
+ * @description The size of this file, in bytes
+ * @default 0
*/
- retried_item_ids: number[];
+ size?: number | null;
+ /**
+ * Sha256
+ * @description SHA256 hash of this model (not always available)
+ */
+ sha256?: string | null;
+ };
+ /** RemoveImagesFromBoardResult */
+ RemoveImagesFromBoardResult: {
+ /**
+ * Affected Boards
+ * @description The ids of boards affected by the delete operation
+ */
+ affected_boards: string[];
+ /**
+ * Removed Images
+ * @description The image names that were removed from their board
+ */
+ removed_images: string[];
};
/**
- * Random Float
- * @description Outputs a single random float
+ * Resize Latents
+ * @description Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.
*/
- RandomFloatInvocation: {
+ ResizeLatentsInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20477,39 +19822,74 @@ export type components = {
/**
* Use Cache
* @description Whether or not to use the cache
- * @default false
+ * @default true
*/
use_cache?: boolean;
/**
- * Low
- * @description The inclusive low value
- * @default 0
+ * @description Latents tensor
+ * @default null
*/
- low?: number;
+ latents?: components["schemas"]["LatentsField"] | null;
/**
- * High
- * @description The exclusive high value
- * @default 1
+ * Width
+ * @description Width of output (px)
+ * @default null
*/
- high?: number;
+ width?: number | null;
/**
- * Decimals
- * @description The number of decimal places to round to
- * @default 2
+ * Height
+ * @description Width of output (px)
+ * @default null
*/
- decimals?: number;
+ height?: number | null;
+ /**
+ * Mode
+ * @description Interpolation mode
+ * @default bilinear
+ * @enum {string}
+ */
+ mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact";
+ /**
+ * Antialias
+ * @description Whether or not to apply antialiasing (bilinear or bicubic only)
+ * @default false
+ */
+ antialias?: boolean;
/**
* type
- * @default rand_float
+ * @default lresize
* @constant
*/
- type: "rand_float";
+ type: "lresize";
};
/**
- * Random Integer
- * @description Outputs a single random integer.
+ * ResourceOrigin
+ * @description The origin of a resource (eg image).
+ *
+ * - INTERNAL: The resource was created by the application.
+ * - EXTERNAL: The resource was not created by the application.
+ * This may be a user-initiated upload, or an internal application upload (eg Canvas init image).
+ * @enum {string}
*/
- RandomIntInvocation: {
+ ResourceOrigin: "internal" | "external";
+ /** RetryItemsResult */
+ RetryItemsResult: {
+ /**
+ * Queue Id
+ * @description The ID of the queue
+ */
+ queue_id: string;
+ /**
+ * Retried Item Ids
+ * @description The IDs of the queue items that were retried
+ */
+ retried_item_ids: number[];
+ };
+ /**
+ * Round Float
+ * @description Rounds a float to a specified number of decimal places.
+ */
+ RoundInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20524,33 +19904,96 @@ export type components = {
/**
* Use Cache
* @description Whether or not to use the cache
- * @default false
+ * @default true
*/
use_cache?: boolean;
/**
- * Low
- * @description The inclusive low value
+ * Value
+ * @description The float value
* @default 0
*/
- low?: number;
+ value?: number;
/**
- * High
- * @description The exclusive high value
- * @default 2147483647
+ * Decimals
+ * @description The number of decimal places
+ * @default 0
*/
- high?: number;
+ decimals?: number;
/**
* type
- * @default rand_int
+ * @default round_float
* @constant
*/
- type: "rand_int";
+ type: "round_float";
+ };
+ /** SAMPoint */
+ SAMPoint: {
+ /**
+ * X
+ * @description The x-coordinate of the point
+ */
+ x: number;
+ /**
+ * Y
+ * @description The y-coordinate of the point
+ */
+ y: number;
+ /** @description The label of the point */
+ label: components["schemas"]["SAMPointLabel"];
};
/**
- * Random Range
- * @description Creates a collection of random numbers
+ * SAMPointLabel
+ * @enum {integer}
*/
- RandomRangeInvocation: {
+ SAMPointLabel: -1 | 0 | 1;
+ /** SAMPointsField */
+ SAMPointsField: {
+ /**
+ * Points
+ * @description The points of the object
+ */
+ points: components["schemas"]["SAMPoint"][];
+ };
+ /**
+ * SD3ConditioningField
+ * @description A conditioning tensor primitive value
+ */
+ SD3ConditioningField: {
+ /**
+ * Conditioning Name
+ * @description The name of conditioning tensor
+ */
+ conditioning_name: string;
+ };
+ /**
+ * SD3ConditioningOutput
+ * @description Base class for nodes that output a single SD3 conditioning tensor
+ */
+ SD3ConditioningOutput: {
+ /** @description Conditioning tensor */
+ conditioning: components["schemas"]["SD3ConditioningField"];
+ /**
+ * type
+ * @default sd3_conditioning_output
+ * @constant
+ */
+ type: "sd3_conditioning_output";
+ };
+ /**
+ * Denoise - SD3
+ * @description Run denoising process with a SD3 model.
+ */
+ SD3DenoiseInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20565,45 +20008,99 @@ export type components = {
/**
* Use Cache
* @description Whether or not to use the cache
- * @default false
+ * @default true
*/
use_cache?: boolean;
/**
- * Low
- * @description The inclusive low value
- * @default 0
+ * @description Latents tensor
+ * @default null
*/
- low?: number;
+ latents?: components["schemas"]["LatentsField"] | null;
/**
- * High
- * @description The exclusive high value
- * @default 2147483647
+ * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.
+ * @default null
*/
- high?: number;
+ denoise_mask?: components["schemas"]["DenoiseMaskField"] | null;
/**
- * Size
- * @description The number of values to generate
+ * Denoising Start
+ * @description When to start denoising, expressed a percentage of total steps
+ * @default 0
+ */
+ denoising_start?: number;
+ /**
+ * Denoising End
+ * @description When to stop denoising, expressed a percentage of total steps
* @default 1
*/
- size?: number;
+ denoising_end?: number;
+ /**
+ * Transformer
+ * @description SD3 model (MMDiTX) to load
+ * @default null
+ */
+ transformer?: components["schemas"]["TransformerField"] | null;
+ /**
+ * @description Positive conditioning tensor
+ * @default null
+ */
+ positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null;
+ /**
+ * @description Negative conditioning tensor
+ * @default null
+ */
+ negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null;
+ /**
+ * CFG Scale
+ * @description Classifier-Free Guidance scale
+ * @default 3.5
+ */
+ cfg_scale?: number | number[];
+ /**
+ * Width
+ * @description Width of the generated image.
+ * @default 1024
+ */
+ width?: number;
+ /**
+ * Height
+ * @description Height of the generated image.
+ * @default 1024
+ */
+ height?: number;
+ /**
+ * Steps
+ * @description Number of steps to run
+ * @default 10
+ */
+ steps?: number;
/**
* Seed
- * @description The seed for the RNG (omit for random)
+ * @description Randomness seed for reproducibility.
* @default 0
*/
seed?: number;
/**
* type
- * @default random_range
+ * @default sd3_denoise
* @constant
*/
- type: "random_range";
+ type: "sd3_denoise";
};
/**
- * Integer Range
- * @description Creates a range of numbers from start to stop with step
+ * Image to Latents - SD3
+ * @description Generates latents from an image.
*/
- RangeInvocation: {
+ SD3ImageToLatentsInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20622,35 +20119,37 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Start
- * @description The start of the range
- * @default 0
- */
- start?: number;
- /**
- * Stop
- * @description The stop of the range
- * @default 10
+ * @description The image to encode
+ * @default null
*/
- stop?: number;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * Step
- * @description The step of the range
- * @default 1
+ * @description VAE
+ * @default null
*/
- step?: number;
+ vae?: components["schemas"]["VAEField"] | null;
/**
* type
- * @default range
+ * @default sd3_i2l
* @constant
*/
- type: "range";
+ type: "sd3_i2l";
};
/**
- * Integer Range of Size
- * @description Creates a range from start to start + (size * step) incremented by step
+ * Latents to Image - SD3
+ * @description Generates an image from latents.
*/
- RangeOfSizeInvocation: {
+ SD3LatentsToImageInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20669,40 +20168,27 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Start
- * @description The start of the range
- * @default 0
- */
- start?: number;
- /**
- * Size
- * @description The number of values
- * @default 1
+ * @description Latents tensor
+ * @default null
*/
- size?: number;
+ latents?: components["schemas"]["LatentsField"] | null;
/**
- * Step
- * @description The step of the range
- * @default 1
+ * @description VAE
+ * @default null
*/
- step?: number;
+ vae?: components["schemas"]["VAEField"] | null;
/**
* type
- * @default range_of_size
+ * @default sd3_l2i
* @constant
*/
- type: "range_of_size";
+ type: "sd3_l2i";
};
/**
- * Create Rectangle Mask
- * @description Create a rectangular mask.
+ * Prompt - SDXL
+ * @description Parse prompt using compel package to conditioning.
*/
- RectangleMaskInvocation: {
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ SDXLCompelPromptInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20721,108 +20207,129 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Width
- * @description The width of the entire mask.
- * @default null
+ * Prompt
+ * @description Prompt to be parsed by Compel to create a conditioning tensor
+ * @default
*/
- width?: number | null;
+ prompt?: string;
/**
- * Height
- * @description The height of the entire mask.
- * @default null
+ * Style
+ * @description Prompt to be parsed by Compel to create a conditioning tensor
+ * @default
*/
- height?: number | null;
+ style?: string;
/**
- * X Left
- * @description The left x-coordinate of the rectangular masked region (inclusive).
- * @default null
+ * Original Width
+ * @default 1024
*/
- x_left?: number | null;
+ original_width?: number;
/**
- * Y Top
- * @description The top y-coordinate of the rectangular masked region (inclusive).
+ * Original Height
+ * @default 1024
+ */
+ original_height?: number;
+ /**
+ * Crop Top
+ * @default 0
+ */
+ crop_top?: number;
+ /**
+ * Crop Left
+ * @default 0
+ */
+ crop_left?: number;
+ /**
+ * Target Width
+ * @default 1024
+ */
+ target_width?: number;
+ /**
+ * Target Height
+ * @default 1024
+ */
+ target_height?: number;
+ /**
+ * CLIP 1
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- y_top?: number | null;
+ clip?: components["schemas"]["CLIPField"] | null;
/**
- * Rectangle Width
- * @description The width of the rectangular masked region.
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- rectangle_width?: number | null;
+ clip2?: components["schemas"]["CLIPField"] | null;
/**
- * Rectangle Height
- * @description The height of the rectangular masked region.
+ * @description A mask defining the region that this conditioning prompt applies to.
* @default null
*/
- rectangle_height?: number | null;
+ mask?: components["schemas"]["TensorField"] | null;
/**
* type
- * @default rectangle_mask
+ * @default sdxl_compel_prompt
* @constant
*/
- type: "rectangle_mask";
+ type: "sdxl_compel_prompt";
};
/**
- * RemoteModelFile
- * @description Information about a downloadable file that forms part of a model.
+ * Apply LoRA Collection - SDXL
+ * @description Applies a collection of SDXL LoRAs to the provided UNet and CLIP models.
*/
- RemoteModelFile: {
+ SDXLLoRACollectionLoader: {
/**
- * Url
- * Format: uri
- * @description The url to download this model file
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- url: string;
+ id: string;
/**
- * Path
- * Format: path
- * @description The path to the file, relative to the model root
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- path: string;
+ is_intermediate?: boolean;
/**
- * Size
- * @description The size of this file, in bytes
- * @default 0
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- size?: number | null;
+ use_cache?: boolean;
/**
- * Sha256
- * @description SHA256 hash of this model (not always available)
+ * LoRAs
+ * @description LoRA models and weights. May be a single LoRA or collection.
+ * @default null
*/
- sha256?: string | null;
- };
- /** RemoveImagesFromBoardResult */
- RemoveImagesFromBoardResult: {
+ loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null;
/**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
+ * UNet
+ * @description UNet (scheduler, LoRAs)
+ * @default null
*/
- affected_boards: string[];
+ unet?: components["schemas"]["UNetField"] | null;
/**
- * Removed Images
- * @description The image names that were removed from their board
+ * CLIP
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @default null
*/
- removed_images: string[];
- };
- /** RemoveVideosFromBoardResult */
- RemoveVideosFromBoardResult: {
+ clip?: components["schemas"]["CLIPField"] | null;
/**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @default null
*/
- affected_boards: string[];
+ clip2?: components["schemas"]["CLIPField"] | null;
/**
- * Removed Videos
- * @description The video ids that were removed from their board
+ * type
+ * @default sdxl_lora_collection_loader
+ * @constant
*/
- removed_videos: string[];
+ type: "sdxl_lora_collection_loader";
};
/**
- * Resize Latents
- * @description Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.
+ * Apply LoRA - SDXL
+ * @description Apply selected lora to unet and text_encoder.
*/
- ResizeLatentsInvocation: {
+ SDXLLoRALoaderInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20841,70 +20348,77 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description Latents tensor
+ * LoRA
+ * @description LoRA model to load
* @default null
*/
- latents?: components["schemas"]["LatentsField"] | null;
+ lora?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * Width
- * @description Width of output (px)
- * @default null
+ * Weight
+ * @description The weight at which the LoRA is applied to each model
+ * @default 0.75
*/
- width?: number | null;
+ weight?: number;
/**
- * Height
- * @description Width of output (px)
+ * UNet
+ * @description UNet (scheduler, LoRAs)
* @default null
*/
- height?: number | null;
+ unet?: components["schemas"]["UNetField"] | null;
/**
- * Mode
- * @description Interpolation mode
- * @default bilinear
- * @enum {string}
+ * CLIP 1
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @default null
*/
- mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact";
+ clip?: components["schemas"]["CLIPField"] | null;
/**
- * Antialias
- * @description Whether or not to apply antialiasing (bilinear or bicubic only)
- * @default false
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @default null
*/
- antialias?: boolean;
+ clip2?: components["schemas"]["CLIPField"] | null;
/**
* type
- * @default lresize
+ * @default sdxl_lora_loader
* @constant
*/
- type: "lresize";
+ type: "sdxl_lora_loader";
};
/**
- * ResourceOrigin
- * @description The origin of a resource (eg image).
- *
- * - INTERNAL: The resource was created by the application.
- * - EXTERNAL: The resource was not created by the application.
- * This may be a user-initiated upload, or an internal application upload (eg Canvas init image).
- * @enum {string}
+ * SDXLLoRALoaderOutput
+ * @description SDXL LoRA Loader Output
*/
- ResourceOrigin: "internal" | "external";
- /** RetryItemsResult */
- RetryItemsResult: {
+ SDXLLoRALoaderOutput: {
/**
- * Queue Id
- * @description The ID of the queue
+ * UNet
+ * @description UNet (scheduler, LoRAs)
+ * @default null
*/
- queue_id: string;
+ unet: components["schemas"]["UNetField"] | null;
/**
- * Retried Item Ids
- * @description The IDs of the queue items that were retried
+ * CLIP 1
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @default null
*/
- retried_item_ids: number[];
+ clip: components["schemas"]["CLIPField"] | null;
+ /**
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @default null
+ */
+ clip2: components["schemas"]["CLIPField"] | null;
+ /**
+ * type
+ * @default sdxl_lora_loader_output
+ * @constant
+ */
+ type: "sdxl_lora_loader_output";
};
/**
- * Round Float
- * @description Rounds a float to a specified number of decimal places.
+ * Main Model - SDXL
+ * @description Loads an sdxl base model, outputting its submodels.
*/
- RoundInvocation: {
+ SDXLModelLoaderInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -20923,92 +20437,54 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Value
- * @description The float value
- * @default 0
- */
- value?: number;
- /**
- * Decimals
- * @description The number of decimal places
- * @default 0
+ * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load
+ * @default null
*/
- decimals?: number;
+ model?: components["schemas"]["ModelIdentifierField"] | null;
/**
* type
- * @default round_float
+ * @default sdxl_model_loader
* @constant
*/
- type: "round_float";
+ type: "sdxl_model_loader";
};
- /** SAMPoint */
- SAMPoint: {
+ /**
+ * SDXLModelLoaderOutput
+ * @description SDXL base model loader output
+ */
+ SDXLModelLoaderOutput: {
/**
- * X
- * @description The x-coordinate of the point
+ * UNet
+ * @description UNet (scheduler, LoRAs)
*/
- x: number;
+ unet: components["schemas"]["UNetField"];
/**
- * Y
- * @description The y-coordinate of the point
+ * CLIP 1
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
*/
- y: number;
- /** @description The label of the point */
- label: components["schemas"]["SAMPointLabel"];
- };
- /**
- * SAMPointLabel
- * @enum {integer}
- */
- SAMPointLabel: -1 | 0 | 1;
- /** SAMPointsField */
- SAMPointsField: {
+ clip: components["schemas"]["CLIPField"];
/**
- * Points
- * @description The points of the object
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
*/
- points: components["schemas"]["SAMPoint"][];
- };
- /**
- * SD3ConditioningField
- * @description A conditioning tensor primitive value
- */
- SD3ConditioningField: {
+ clip2: components["schemas"]["CLIPField"];
/**
- * Conditioning Name
- * @description The name of conditioning tensor
+ * VAE
+ * @description VAE
*/
- conditioning_name: string;
- };
- /**
- * SD3ConditioningOutput
- * @description Base class for nodes that output a single SD3 conditioning tensor
- */
- SD3ConditioningOutput: {
- /** @description Conditioning tensor */
- conditioning: components["schemas"]["SD3ConditioningField"];
+ vae: components["schemas"]["VAEField"];
/**
* type
- * @default sd3_conditioning_output
+ * @default sdxl_model_loader_output
* @constant
*/
- type: "sd3_conditioning_output";
+ type: "sdxl_model_loader_output";
};
/**
- * Denoise - SD3
- * @description Run denoising process with a SD3 model.
+ * Prompt - SDXL Refiner
+ * @description Parse prompt using compel package to conditioning.
*/
- SD3DenoiseInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ SDXLRefinerCompelPromptInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21027,85 +20503,120 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description Latents tensor
- * @default null
+ * Style
+ * @description Prompt to be parsed by Compel to create a conditioning tensor
+ * @default
*/
- latents?: components["schemas"]["LatentsField"] | null;
+ style?: string;
/**
- * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.
- * @default null
+ * Original Width
+ * @default 1024
*/
- denoise_mask?: components["schemas"]["DenoiseMaskField"] | null;
+ original_width?: number;
/**
- * Denoising Start
- * @description When to start denoising, expressed a percentage of total steps
+ * Original Height
+ * @default 1024
+ */
+ original_height?: number;
+ /**
+ * Crop Top
* @default 0
*/
- denoising_start?: number;
+ crop_top?: number;
/**
- * Denoising End
- * @description When to stop denoising, expressed a percentage of total steps
- * @default 1
+ * Crop Left
+ * @default 0
*/
- denoising_end?: number;
+ crop_left?: number;
/**
- * Transformer
- * @description SD3 model (MMDiTX) to load
- * @default null
+ * Aesthetic Score
+ * @description The aesthetic score to apply to the conditioning tensor
+ * @default 6
*/
- transformer?: components["schemas"]["TransformerField"] | null;
+ aesthetic_score?: number;
/**
- * @description Positive conditioning tensor
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null;
+ clip2?: components["schemas"]["CLIPField"] | null;
/**
- * @description Negative conditioning tensor
- * @default null
+ * type
+ * @default sdxl_refiner_compel_prompt
+ * @constant
*/
- negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null;
+ type: "sdxl_refiner_compel_prompt";
+ };
+ /**
+ * Refiner Model - SDXL
+ * @description Loads an sdxl refiner model, outputting its submodels.
+ */
+ SDXLRefinerModelLoaderInvocation: {
/**
- * CFG Scale
- * @description Classifier-Free Guidance scale
- * @default 3.5
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- cfg_scale?: number | number[];
+ id: string;
/**
- * Width
- * @description Width of the generated image.
- * @default 1024
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- width?: number;
+ is_intermediate?: boolean;
/**
- * Height
- * @description Height of the generated image.
- * @default 1024
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- height?: number;
+ use_cache?: boolean;
/**
- * Steps
- * @description Number of steps to run
- * @default 10
+ * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load
+ * @default null
*/
- steps?: number;
+ model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * Seed
- * @description Randomness seed for reproducibility.
- * @default 0
+ * type
+ * @default sdxl_refiner_model_loader
+ * @constant
*/
- seed?: number;
+ type: "sdxl_refiner_model_loader";
+ };
+ /**
+ * SDXLRefinerModelLoaderOutput
+ * @description SDXL refiner model loader output
+ */
+ SDXLRefinerModelLoaderOutput: {
+ /**
+ * UNet
+ * @description UNet (scheduler, LoRAs)
+ */
+ unet: components["schemas"]["UNetField"];
+ /**
+ * CLIP 2
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ */
+ clip2: components["schemas"]["CLIPField"];
+ /**
+ * VAE
+ * @description VAE
+ */
+ vae: components["schemas"]["VAEField"];
/**
* type
- * @default sd3_denoise
+ * @default sdxl_refiner_model_loader_output
* @constant
*/
- type: "sd3_denoise";
+ type: "sdxl_refiner_model_loader_output";
};
/**
- * Image to Latents - SD3
- * @description Generates latents from an image.
+ * SQLiteDirection
+ * @enum {string}
*/
- SD3ImageToLatentsInvocation: {
+ SQLiteDirection: "ASC" | "DESC";
+ /**
+ * Save Image
+ * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image.
+ */
+ SaveImageInvocation: {
/**
* @description The board to save the image to
* @default null
@@ -21130,41 +20641,26 @@ export type components = {
/**
* Use Cache
* @description Whether or not to use the cache
- * @default true
+ * @default false
*/
use_cache?: boolean;
/**
- * @description The image to encode
+ * @description The image to process
* @default null
*/
image?: components["schemas"]["ImageField"] | null;
- /**
- * @description VAE
- * @default null
- */
- vae?: components["schemas"]["VAEField"] | null;
/**
* type
- * @default sd3_i2l
+ * @default save_image
* @constant
*/
- type: "sd3_i2l";
+ type: "save_image";
};
/**
- * Latents to Image - SD3
- * @description Generates an image from latents.
+ * Scale Latents
+ * @description Scales latents by a given factor.
*/
- SD3LatentsToImageInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ ScaleLatentsInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21188,22 +20684,36 @@ export type components = {
*/
latents?: components["schemas"]["LatentsField"] | null;
/**
- * @description VAE
+ * Scale Factor
+ * @description The factor by which to scale
* @default null
*/
- vae?: components["schemas"]["VAEField"] | null;
+ scale_factor?: number | null;
+ /**
+ * Mode
+ * @description Interpolation mode
+ * @default bilinear
+ * @enum {string}
+ */
+ mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact";
+ /**
+ * Antialias
+ * @description Whether or not to apply antialiasing (bilinear or bicubic only)
+ * @default false
+ */
+ antialias?: boolean;
/**
* type
- * @default sd3_l2i
+ * @default lscale
* @constant
*/
- type: "sd3_l2i";
+ type: "lscale";
};
/**
- * Prompt - SDXL
- * @description Parse prompt using compel package to conditioning.
+ * Scheduler
+ * @description Selects a scheduler.
*/
- SDXLCompelPromptInvocation: {
+ SchedulerInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21222,76 +20732,137 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Prompt
- * @description Prompt to be parsed by Compel to create a conditioning tensor
- * @default
+ * Scheduler
+ * @description Scheduler to use during inference
+ * @default euler
+ * @enum {string}
*/
- prompt?: string;
+ scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
/**
- * Style
- * @description Prompt to be parsed by Compel to create a conditioning tensor
- * @default
+ * type
+ * @default scheduler
+ * @constant
*/
- style?: string;
- /**
- * Original Width
- * @default 1024
+ type: "scheduler";
+ };
+ /** SchedulerOutput */
+ SchedulerOutput: {
+ /**
+ * Scheduler
+ * @description Scheduler to use during inference
+ * @enum {string}
*/
- original_width?: number;
+ scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
/**
- * Original Height
- * @default 1024
+ * type
+ * @default scheduler_output
+ * @constant
*/
- original_height?: number;
+ type: "scheduler_output";
+ };
+ /**
+ * SchedulerPredictionType
+ * @description Scheduler prediction type.
+ * @enum {string}
+ */
+ SchedulerPredictionType: "epsilon" | "v_prediction" | "sample";
+ /**
+ * Main Model - SD3
+ * @description Loads a SD3 base model, outputting its submodels.
+ */
+ Sd3ModelLoaderInvocation: {
/**
- * Crop Top
- * @default 0
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- crop_top?: number;
+ id: string;
/**
- * Crop Left
- * @default 0
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- crop_left?: number;
+ is_intermediate?: boolean;
/**
- * Target Width
- * @default 1024
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- target_width?: number;
+ use_cache?: boolean;
+ /** @description SD3 model (MMDiTX) to load */
+ model: components["schemas"]["ModelIdentifierField"];
/**
- * Target Height
- * @default 1024
+ * T5 Encoder
+ * @description T5 tokenizer and text encoder
+ * @default null
*/
- target_height?: number;
+ t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * CLIP 1
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * CLIP L Encoder
+ * @description CLIP Embed loader
* @default null
*/
- clip?: components["schemas"]["CLIPField"] | null;
+ clip_l_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * CLIP G Encoder
+ * @description CLIP-G Embed loader
* @default null
*/
- clip2?: components["schemas"]["CLIPField"] | null;
+ clip_g_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * @description A mask defining the region that this conditioning prompt applies to.
+ * VAE
+ * @description VAE model to load
* @default null
*/
- mask?: components["schemas"]["TensorField"] | null;
+ vae_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
* type
- * @default sdxl_compel_prompt
+ * @default sd3_model_loader
* @constant
*/
- type: "sdxl_compel_prompt";
+ type: "sd3_model_loader";
};
/**
- * Apply LoRA Collection - SDXL
- * @description Applies a collection of SDXL LoRAs to the provided UNet and CLIP models.
+ * Sd3ModelLoaderOutput
+ * @description SD3 base model loader output.
*/
- SDXLLoRACollectionLoader: {
+ Sd3ModelLoaderOutput: {
+ /**
+ * Transformer
+ * @description Transformer
+ */
+ transformer: components["schemas"]["TransformerField"];
+ /**
+ * CLIP L
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ */
+ clip_l: components["schemas"]["CLIPField"];
+ /**
+ * CLIP G
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ */
+ clip_g: components["schemas"]["CLIPField"];
+ /**
+ * T5 Encoder
+ * @description T5 tokenizer and text encoder
+ */
+ t5_encoder: components["schemas"]["T5EncoderField"];
+ /**
+ * VAE
+ * @description VAE
+ */
+ vae: components["schemas"]["VAEField"];
+ /**
+ * type
+ * @default sd3_model_loader_output
+ * @constant
+ */
+ type: "sd3_model_loader_output";
+ };
+ /**
+ * Prompt - SD3
+ * @description Encodes and preps a prompt for a SD3 image.
+ */
+ Sd3TextEncoderInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21310,41 +20881,41 @@ export type components = {
*/
use_cache?: boolean;
/**
- * LoRAs
- * @description LoRA models and weights. May be a single LoRA or collection.
+ * CLIP L
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null;
+ clip_l?: components["schemas"]["CLIPField"] | null;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * CLIP G
+ * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
* @default null
*/
- unet?: components["schemas"]["UNetField"] | null;
+ clip_g?: components["schemas"]["CLIPField"] | null;
/**
- * CLIP
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * T5Encoder
+ * @description T5 tokenizer and text encoder
* @default null
*/
- clip?: components["schemas"]["CLIPField"] | null;
+ t5_encoder?: components["schemas"]["T5EncoderField"] | null;
/**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Prompt
+ * @description Text prompt to encode.
* @default null
*/
- clip2?: components["schemas"]["CLIPField"] | null;
+ prompt?: string | null;
/**
* type
- * @default sdxl_lora_collection_loader
+ * @default sd3_text_encoder
* @constant
*/
- type: "sdxl_lora_collection_loader";
+ type: "sd3_text_encoder";
};
/**
- * Apply LoRA - SDXL
- * @description Apply selected lora to unet and text_encoder.
+ * Apply Seamless - SD1.5, SDXL
+ * @description Applies the seamless transformation to the Model UNet and VAE.
*/
- SDXLLoRALoaderInvocation: {
+ SeamlessModeInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21362,18 +20933,6 @@ export type components = {
* @default true
*/
use_cache?: boolean;
- /**
- * LoRA
- * @description LoRA model to load
- * @default null
- */
- lora?: components["schemas"]["ModelIdentifierField"] | null;
- /**
- * Weight
- * @description The weight at which the LoRA is applied to each model
- * @default 0.75
- */
- weight?: number;
/**
* UNet
* @description UNet (scheduler, LoRAs)
@@ -21381,29 +20940,35 @@ export type components = {
*/
unet?: components["schemas"]["UNetField"] | null;
/**
- * CLIP 1
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * VAE
+ * @description VAE model to load
* @default null
*/
- clip?: components["schemas"]["CLIPField"] | null;
+ vae?: components["schemas"]["VAEField"] | null;
/**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
- * @default null
+ * Seamless Y
+ * @description Specify whether Y axis is seamless
+ * @default true
*/
- clip2?: components["schemas"]["CLIPField"] | null;
+ seamless_y?: boolean;
+ /**
+ * Seamless X
+ * @description Specify whether X axis is seamless
+ * @default true
+ */
+ seamless_x?: boolean;
/**
* type
- * @default sdxl_lora_loader
+ * @default seamless
* @constant
*/
- type: "sdxl_lora_loader";
+ type: "seamless";
};
/**
- * SDXLLoRALoaderOutput
- * @description SDXL LoRA Loader Output
+ * SeamlessModeOutput
+ * @description Modified Seamless Model output
*/
- SDXLLoRALoaderOutput: {
+ SeamlessModeOutput: {
/**
* UNet
* @description UNet (scheduler, LoRAs)
@@ -21411,29 +20976,23 @@ export type components = {
*/
unet: components["schemas"]["UNetField"] | null;
/**
- * CLIP 1
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
- * @default null
- */
- clip: components["schemas"]["CLIPField"] | null;
- /**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * VAE
+ * @description VAE
* @default null
*/
- clip2: components["schemas"]["CLIPField"] | null;
+ vae: components["schemas"]["VAEField"] | null;
/**
* type
- * @default sdxl_lora_loader_output
+ * @default seamless_output
* @constant
*/
- type: "sdxl_lora_loader_output";
+ type: "seamless_output";
};
/**
- * Main Model - SDXL
- * @description Loads an sdxl base model, outputting its submodels.
+ * Segment Anything
+ * @description Runs a Segment Anything Model (SAM or SAM2).
*/
- SDXLModelLoaderInvocation: {
+ SegmentAnythingInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21452,283 +21011,268 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load
+ * Model
+ * @description The Segment Anything model to use (SAM or SAM2).
* @default null
*/
- model?: components["schemas"]["ModelIdentifierField"] | null;
+ model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge" | "segment-anything-2-tiny" | "segment-anything-2-small" | "segment-anything-2-base" | "segment-anything-2-large") | null;
/**
- * type
- * @default sdxl_model_loader
- * @constant
+ * @description The image to segment.
+ * @default null
*/
- type: "sdxl_model_loader";
- };
- /**
- * SDXLModelLoaderOutput
- * @description SDXL base model loader output
- */
- SDXLModelLoaderOutput: {
+ image?: components["schemas"]["ImageField"] | null;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * Bounding Boxes
+ * @description The bounding boxes to prompt the model with.
+ * @default null
*/
- unet: components["schemas"]["UNetField"];
+ bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null;
/**
- * CLIP 1
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Point Lists
+ * @description The list of point lists to prompt the model with. Each list of points represents a single object.
+ * @default null
*/
- clip: components["schemas"]["CLIPField"];
+ point_lists?: components["schemas"]["SAMPointsField"][] | null;
/**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
- */
- clip2: components["schemas"]["CLIPField"];
+ * Apply Polygon Refinement
+ * @description Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging).
+ * @default true
+ */
+ apply_polygon_refinement?: boolean;
/**
- * VAE
- * @description VAE
+ * Mask Filter
+ * @description The filtering to apply to the detected masks before merging them into a final output.
+ * @default all
+ * @enum {string}
*/
- vae: components["schemas"]["VAEField"];
+ mask_filter?: "all" | "largest" | "highest_box_score";
/**
* type
- * @default sdxl_model_loader_output
+ * @default segment_anything
* @constant
*/
- type: "sdxl_model_loader_output";
+ type: "segment_anything";
};
- /**
- * Prompt - SDXL Refiner
- * @description Parse prompt using compel package to conditioning.
- */
- SDXLRefinerCompelPromptInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
+ /** SessionProcessorStatus */
+ SessionProcessorStatus: {
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Is Started
+ * @description Whether the session processor is started
*/
- is_intermediate?: boolean;
+ is_started: boolean;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Is Processing
+ * @description Whether a session is being processed
*/
- use_cache?: boolean;
+ is_processing: boolean;
+ };
+ /**
+ * SessionQueueAndProcessorStatus
+ * @description The overall status of session queue and processor
+ */
+ SessionQueueAndProcessorStatus: {
+ queue: components["schemas"]["SessionQueueStatus"];
+ processor: components["schemas"]["SessionProcessorStatus"];
+ };
+ /** SessionQueueCountsByDestination */
+ SessionQueueCountsByDestination: {
/**
- * Style
- * @description Prompt to be parsed by Compel to create a conditioning tensor
- * @default
+ * Queue Id
+ * @description The ID of the queue
*/
- style?: string;
+ queue_id: string;
/**
- * Original Width
- * @default 1024
+ * Destination
+ * @description The destination of queue items included in this status
*/
- original_width?: number;
+ destination: string;
/**
- * Original Height
- * @default 1024
+ * Pending
+ * @description Number of queue items with status 'pending' for the destination
*/
- original_height?: number;
+ pending: number;
/**
- * Crop Top
- * @default 0
+ * In Progress
+ * @description Number of queue items with status 'in_progress' for the destination
*/
- crop_top?: number;
+ in_progress: number;
/**
- * Crop Left
- * @default 0
+ * Completed
+ * @description Number of queue items with status 'complete' for the destination
*/
- crop_left?: number;
+ completed: number;
/**
- * Aesthetic Score
- * @description The aesthetic score to apply to the conditioning tensor
- * @default 6
+ * Failed
+ * @description Number of queue items with status 'error' for the destination
*/
- aesthetic_score?: number;
+ failed: number;
/**
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
- * @default null
+ * Canceled
+ * @description Number of queue items with status 'canceled' for the destination
*/
- clip2?: components["schemas"]["CLIPField"] | null;
+ canceled: number;
/**
- * type
- * @default sdxl_refiner_compel_prompt
- * @constant
+ * Total
+ * @description Total number of queue items for the destination
*/
- type: "sdxl_refiner_compel_prompt";
+ total: number;
};
/**
- * Refiner Model - SDXL
- * @description Loads an sdxl refiner model, outputting its submodels.
+ * SessionQueueItem
+ * @description Session queue item without the full graph. Used for serialization.
*/
- SDXLRefinerModelLoaderInvocation: {
+ SessionQueueItem: {
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Item Id
+ * @description The identifier of the session queue item
*/
- id: string;
+ item_id: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Status
+ * @description The status of this queue item
+ * @default pending
+ * @enum {string}
*/
- is_intermediate?: boolean;
+ status: "pending" | "in_progress" | "completed" | "failed" | "canceled";
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Priority
+ * @description The priority of this queue item
+ * @default 0
*/
- use_cache?: boolean;
+ priority: number;
/**
- * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load
- * @default null
+ * Batch Id
+ * @description The ID of the batch associated with this queue item
*/
- model?: components["schemas"]["ModelIdentifierField"] | null;
+ batch_id: string;
/**
- * type
- * @default sdxl_refiner_model_loader
- * @constant
+ * Origin
+ * @description The origin of this queue item. This data is used by the frontend to determine how to handle results.
*/
- type: "sdxl_refiner_model_loader";
- };
- /**
- * SDXLRefinerModelLoaderOutput
- * @description SDXL refiner model loader output
- */
- SDXLRefinerModelLoaderOutput: {
+ origin?: string | null;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * Destination
+ * @description The origin of this queue item. This data is used by the frontend to determine how to handle results
*/
- unet: components["schemas"]["UNetField"];
+ destination?: string | null;
/**
- * CLIP 2
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Session Id
+ * @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed.
*/
- clip2: components["schemas"]["CLIPField"];
+ session_id: string;
/**
- * VAE
- * @description VAE
+ * Error Type
+ * @description The error type if this queue item errored
*/
- vae: components["schemas"]["VAEField"];
+ error_type?: string | null;
/**
- * type
- * @default sdxl_refiner_model_loader_output
- * @constant
+ * Error Message
+ * @description The error message if this queue item errored
*/
- type: "sdxl_refiner_model_loader_output";
- };
- /**
- * SQLiteDirection
- * @enum {string}
- */
- SQLiteDirection: "ASC" | "DESC";
- /**
- * Save Image
- * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image.
- */
- SaveImageInvocation: {
+ error_message?: string | null;
/**
- * @description The board to save the image to
- * @default null
+ * Error Traceback
+ * @description The error traceback if this queue item errored
*/
- board?: components["schemas"]["BoardField"] | null;
+ error_traceback?: string | null;
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Created At
+ * @description When this queue item was created
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ created_at: string;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Updated At
+ * @description When this queue item was updated
*/
- id: string;
+ updated_at: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Started At
+ * @description When this queue item was started
*/
- is_intermediate?: boolean;
+ started_at?: string | null;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default false
+ * Completed At
+ * @description When this queue item was completed
*/
- use_cache?: boolean;
+ completed_at?: string | null;
/**
- * @description The image to process
- * @default null
+ * Queue Id
+ * @description The id of the queue with which this item is associated
*/
- image?: components["schemas"]["ImageField"] | null;
+ queue_id: string;
/**
- * type
- * @default save_image
- * @constant
+ * Field Values
+ * @description The field values that were used for this queue item
*/
- type: "save_image";
- };
- /**
- * Scale Latents
- * @description Scales latents by a given factor.
- */
- ScaleLatentsInvocation: {
+ field_values?: components["schemas"]["NodeFieldValue"][] | null;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Retried From Item Id
+ * @description The item_id of the queue item that this item was retried from
*/
- id: string;
+ retried_from_item_id?: number | null;
+ /** @description The fully-populated session to be executed */
+ session: components["schemas"]["GraphExecutionState"];
+ /** @description The workflow associated with this queue item */
+ workflow?: components["schemas"]["WorkflowWithoutID"] | null;
+ };
+ /** SessionQueueStatus */
+ SessionQueueStatus: {
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Queue Id
+ * @description The ID of the queue
*/
- is_intermediate?: boolean;
+ queue_id: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Item Id
+ * @description The current queue item id
*/
- use_cache?: boolean;
+ item_id: number | null;
/**
- * @description Latents tensor
- * @default null
+ * Batch Id
+ * @description The current queue item's batch id
*/
- latents?: components["schemas"]["LatentsField"] | null;
+ batch_id: string | null;
/**
- * Scale Factor
- * @description The factor by which to scale
- * @default null
+ * Session Id
+ * @description The current queue item's session id
*/
- scale_factor?: number | null;
+ session_id: string | null;
/**
- * Mode
- * @description Interpolation mode
- * @default bilinear
- * @enum {string}
+ * Pending
+ * @description Number of queue items with status 'pending'
*/
- mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact";
+ pending: number;
/**
- * Antialias
- * @description Whether or not to apply antialiasing (bilinear or bicubic only)
- * @default false
+ * In Progress
+ * @description Number of queue items with status 'in_progress'
*/
- antialias?: boolean;
+ in_progress: number;
/**
- * type
- * @default lscale
- * @constant
+ * Completed
+ * @description Number of queue items with status 'complete'
*/
- type: "lscale";
+ completed: number;
+ /**
+ * Failed
+ * @description Number of queue items with status 'error'
+ */
+ failed: number;
+ /**
+ * Canceled
+ * @description Number of queue items with status 'canceled'
+ */
+ canceled: number;
+ /**
+ * Total
+ * @description Total number of queue items
+ */
+ total: number;
};
/**
- * Scheduler
- * @description Selects a scheduler.
+ * Show Image
+ * @description Displays a provided image using the OS image viewer, and passes it forward in the pipeline.
*/
- SchedulerInvocation: {
+ ShowImageInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21747,137 +21291,105 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Scheduler
- * @description Scheduler to use during inference
- * @default euler
- * @enum {string}
- */
- scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
- /**
- * type
- * @default scheduler
- * @constant
- */
- type: "scheduler";
- };
- /** SchedulerOutput */
- SchedulerOutput: {
- /**
- * Scheduler
- * @description Scheduler to use during inference
- * @enum {string}
+ * @description The image to show
+ * @default null
*/
- scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
+ image?: components["schemas"]["ImageField"] | null;
/**
* type
- * @default scheduler_output
+ * @default show_image
* @constant
*/
- type: "scheduler_output";
+ type: "show_image";
};
/**
- * SchedulerPredictionType
- * @description Scheduler prediction type.
- * @enum {string}
- */
- SchedulerPredictionType: "epsilon" | "v_prediction" | "sample";
- /**
- * Main Model - SD3
- * @description Loads a SD3 base model, outputting its submodels.
+ * SigLIP_Diffusers_Config
+ * @description Model config for SigLIP.
*/
- Sd3ModelLoaderInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
- /**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
- */
- is_intermediate?: boolean;
+ SigLIP_Diffusers_Config: {
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Key
+ * @description A unique key for this model.
*/
- use_cache?: boolean;
- /** @description SD3 model (MMDiTX) to load */
- model: components["schemas"]["ModelIdentifierField"];
+ key: string;
/**
- * T5 Encoder
- * @description T5 tokenizer and text encoder
- * @default null
+ * Hash
+ * @description The hash of the model file(s).
*/
- t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null;
+ hash: string;
/**
- * CLIP L Encoder
- * @description CLIP Embed loader
- * @default null
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- clip_l_model?: components["schemas"]["ModelIdentifierField"] | null;
+ path: string;
/**
- * CLIP G Encoder
- * @description CLIP-G Embed loader
- * @default null
+ * File Size
+ * @description The size of the model in bytes.
*/
- clip_g_model?: components["schemas"]["ModelIdentifierField"] | null;
+ file_size: number;
/**
- * VAE
- * @description VAE model to load
- * @default null
+ * Name
+ * @description Name of the model.
*/
- vae_model?: components["schemas"]["ModelIdentifierField"] | null;
+ name: string;
/**
- * type
- * @default sd3_model_loader
- * @constant
+ * Description
+ * @description Model description
*/
- type: "sd3_model_loader";
- };
- /**
- * Sd3ModelLoaderOutput
- * @description SD3 base model loader output.
- */
- Sd3ModelLoaderOutput: {
+ description: string | null;
/**
- * Transformer
- * @description Transformer
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- transformer: components["schemas"]["TransformerField"];
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * CLIP L
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- clip_l: components["schemas"]["CLIPField"];
+ source_api_response: string | null;
/**
- * CLIP G
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * Cover Image
+ * @description Url for image to preview model
*/
- clip_g: components["schemas"]["CLIPField"];
+ cover_image: string | null;
/**
- * T5 Encoder
- * @description T5 tokenizer and text encoder
+ * Format
+ * @default diffusers
+ * @constant
*/
- t5_encoder: components["schemas"]["T5EncoderField"];
+ format: "diffusers";
+ /** @default */
+ repo_variant: components["schemas"]["ModelRepoVariant"];
/**
- * VAE
- * @description VAE
+ * Type
+ * @default siglip
+ * @constant
*/
- vae: components["schemas"]["VAEField"];
+ type: "siglip";
/**
- * type
- * @default sd3_model_loader_output
+ * Base
+ * @default any
* @constant
*/
- type: "sd3_model_loader_output";
+ base: "any";
};
/**
- * Prompt - SD3
- * @description Encodes and preps a prompt for a SD3 image.
+ * Image-to-Image (Autoscale)
+ * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached.
*/
- Sd3TextEncoderInvocation: {
+ SpandrelImageToImageAutoscaleInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -21896,118 +21408,56 @@ export type components = {
*/
use_cache?: boolean;
/**
- * CLIP L
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
- * @default null
- */
- clip_l?: components["schemas"]["CLIPField"] | null;
- /**
- * CLIP G
- * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
+ * @description The input image
* @default null
*/
- clip_g?: components["schemas"]["CLIPField"] | null;
+ image?: components["schemas"]["ImageField"] | null;
/**
- * T5Encoder
- * @description T5 tokenizer and text encoder
+ * Image-to-Image Model
+ * @description Image-to-Image model
* @default null
*/
- t5_encoder?: components["schemas"]["T5EncoderField"] | null;
+ image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * Prompt
- * @description Text prompt to encode.
- * @default null
+ * Tile Size
+ * @description The tile size for tiled image-to-image. Set to 0 to disable tiling.
+ * @default 512
*/
- prompt?: string | null;
+ tile_size?: number;
/**
* type
- * @default sd3_text_encoder
+ * @default spandrel_image_to_image_autoscale
* @constant
*/
- type: "sd3_text_encoder";
- };
- /**
- * Apply Seamless - SD1.5, SDXL
- * @description Applies the seamless transformation to the Model UNet and VAE.
- */
- SeamlessModeInvocation: {
+ type: "spandrel_image_to_image_autoscale";
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Scale
+ * @description The final scale of the output image. If the model does not upscale the image, this will be ignored.
+ * @default 4
*/
- id: string;
+ scale?: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
+ * Fit To Multiple Of 8
+ * @description If true, the output image will be resized to the nearest multiple of 8 in both dimensions.
* @default false
*/
- is_intermediate?: boolean;
- /**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
- */
- use_cache?: boolean;
- /**
- * UNet
- * @description UNet (scheduler, LoRAs)
- * @default null
- */
- unet?: components["schemas"]["UNetField"] | null;
- /**
- * VAE
- * @description VAE model to load
- * @default null
- */
- vae?: components["schemas"]["VAEField"] | null;
- /**
- * Seamless Y
- * @description Specify whether Y axis is seamless
- * @default true
- */
- seamless_y?: boolean;
- /**
- * Seamless X
- * @description Specify whether X axis is seamless
- * @default true
- */
- seamless_x?: boolean;
- /**
- * type
- * @default seamless
- * @constant
- */
- type: "seamless";
+ fit_to_multiple_of_8?: boolean;
};
/**
- * SeamlessModeOutput
- * @description Modified Seamless Model output
+ * Image-to-Image
+ * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel).
*/
- SeamlessModeOutput: {
+ SpandrelImageToImageInvocation: {
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * @description The board to save the image to
* @default null
*/
- unet: components["schemas"]["UNetField"] | null;
+ board?: components["schemas"]["BoardField"] | null;
/**
- * VAE
- * @description VAE
+ * @description Optional metadata to be saved with the image
* @default null
*/
- vae: components["schemas"]["VAEField"] | null;
- /**
- * type
- * @default seamless_output
- * @constant
- */
- type: "seamless_output";
- };
- /**
- * Segment Anything
- * @description Runs a Segment Anything Model (SAM or SAM2).
- */
- SegmentAnythingInvocation: {
+ metadata?: components["schemas"]["MetadataField"] | null;
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -22026,284 +21476,296 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Model
- * @description The Segment Anything model to use (SAM or SAM2).
- * @default null
- */
- model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge" | "segment-anything-2-tiny" | "segment-anything-2-small" | "segment-anything-2-base" | "segment-anything-2-large") | null;
- /**
- * @description The image to segment.
+ * @description The input image
* @default null
*/
image?: components["schemas"]["ImageField"] | null;
/**
- * Bounding Boxes
- * @description The bounding boxes to prompt the model with.
- * @default null
- */
- bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null;
- /**
- * Point Lists
- * @description The list of point lists to prompt the model with. Each list of points represents a single object.
+ * Image-to-Image Model
+ * @description Image-to-Image model
* @default null
*/
- point_lists?: components["schemas"]["SAMPointsField"][] | null;
- /**
- * Apply Polygon Refinement
- * @description Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging).
- * @default true
- */
- apply_polygon_refinement?: boolean;
+ image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * Mask Filter
- * @description The filtering to apply to the detected masks before merging them into a final output.
- * @default all
- * @enum {string}
+ * Tile Size
+ * @description The tile size for tiled image-to-image. Set to 0 to disable tiling.
+ * @default 512
*/
- mask_filter?: "all" | "largest" | "highest_box_score";
+ tile_size?: number;
/**
* type
- * @default segment_anything
+ * @default spandrel_image_to_image
* @constant
*/
- type: "segment_anything";
- };
- /** SessionProcessorStatus */
- SessionProcessorStatus: {
- /**
- * Is Started
- * @description Whether the session processor is started
- */
- is_started: boolean;
- /**
- * Is Processing
- * @description Whether a session is being processed
- */
- is_processing: boolean;
+ type: "spandrel_image_to_image";
};
/**
- * SessionQueueAndProcessorStatus
- * @description The overall status of session queue and processor
+ * Spandrel_Checkpoint_Config
+ * @description Model config for Spandrel Image to Image models.
*/
- SessionQueueAndProcessorStatus: {
- queue: components["schemas"]["SessionQueueStatus"];
- processor: components["schemas"]["SessionProcessorStatus"];
- };
- /** SessionQueueCountsByDestination */
- SessionQueueCountsByDestination: {
- /**
- * Queue Id
- * @description The ID of the queue
- */
- queue_id: string;
- /**
- * Destination
- * @description The destination of queue items included in this status
- */
- destination: string;
+ Spandrel_Checkpoint_Config: {
/**
- * Pending
- * @description Number of queue items with status 'pending' for the destination
+ * Key
+ * @description A unique key for this model.
*/
- pending: number;
+ key: string;
/**
- * In Progress
- * @description Number of queue items with status 'in_progress' for the destination
+ * Hash
+ * @description The hash of the model file(s).
*/
- in_progress: number;
+ hash: string;
/**
- * Completed
- * @description Number of queue items with status 'complete' for the destination
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- completed: number;
+ path: string;
/**
- * Failed
- * @description Number of queue items with status 'error' for the destination
+ * File Size
+ * @description The size of the model in bytes.
*/
- failed: number;
+ file_size: number;
/**
- * Canceled
- * @description Number of queue items with status 'canceled' for the destination
+ * Name
+ * @description Name of the model.
*/
- canceled: number;
+ name: string;
/**
- * Total
- * @description Total number of queue items for the destination
+ * Description
+ * @description Model description
*/
- total: number;
- };
- /**
- * SessionQueueItem
- * @description Session queue item without the full graph. Used for serialization.
- */
- SessionQueueItem: {
+ description: string | null;
/**
- * Item Id
- * @description The identifier of the session queue item
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- item_id: number;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * Status
- * @description The status of this queue item
- * @default pending
- * @enum {string}
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- status: "pending" | "in_progress" | "completed" | "failed" | "canceled";
+ source_api_response: string | null;
/**
- * Priority
- * @description The priority of this queue item
- * @default 0
+ * Cover Image
+ * @description Url for image to preview model
*/
- priority: number;
+ cover_image: string | null;
/**
- * Batch Id
- * @description The ID of the batch associated with this queue item
+ * Base
+ * @default any
+ * @constant
*/
- batch_id: string;
+ base: "any";
/**
- * Origin
- * @description The origin of this queue item. This data is used by the frontend to determine how to handle results.
+ * Type
+ * @default spandrel_image_to_image
+ * @constant
*/
- origin?: string | null;
+ type: "spandrel_image_to_image";
/**
- * Destination
- * @description The origin of this queue item. This data is used by the frontend to determine how to handle results
+ * Format
+ * @default checkpoint
+ * @constant
*/
- destination?: string | null;
+ format: "checkpoint";
+ };
+ /** StarredImagesResult */
+ StarredImagesResult: {
/**
- * Session Id
- * @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed.
+ * Affected Boards
+ * @description The ids of boards affected by the delete operation
*/
- session_id: string;
+ affected_boards: string[];
/**
- * Error Type
- * @description The error type if this queue item errored
+ * Starred Images
+ * @description The names of the images that were starred
*/
- error_type?: string | null;
- /**
- * Error Message
- * @description The error message if this queue item errored
- */
- error_message?: string | null;
+ starred_images: string[];
+ };
+ /** StarterModel */
+ StarterModel: {
+ /** Description */
+ description: string;
+ /** Source */
+ source: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["BaseModelType"];
+ type: components["schemas"]["ModelType"];
+ format?: components["schemas"]["ModelFormat"] | null;
/**
- * Error Traceback
- * @description The error traceback if this queue item errored
+ * Is Installed
+ * @default false
*/
- error_traceback?: string | null;
+ is_installed?: boolean;
/**
- * Created At
- * @description When this queue item was created
+ * Previous Names
+ * @default []
*/
- created_at: string;
+ previous_names?: string[];
+ /** Dependencies */
+ dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null;
+ };
+ /** StarterModelBundle */
+ StarterModelBundle: {
+ /** Name */
+ name: string;
+ /** Models */
+ models: components["schemas"]["StarterModel"][];
+ };
+ /** StarterModelResponse */
+ StarterModelResponse: {
+ /** Starter Models */
+ starter_models: components["schemas"]["StarterModel"][];
+ /** Starter Bundles */
+ starter_bundles: {
+ [key: string]: components["schemas"]["StarterModelBundle"];
+ };
+ };
+ /** StarterModelWithoutDependencies */
+ StarterModelWithoutDependencies: {
+ /** Description */
+ description: string;
+ /** Source */
+ source: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["BaseModelType"];
+ type: components["schemas"]["ModelType"];
+ format?: components["schemas"]["ModelFormat"] | null;
/**
- * Updated At
- * @description When this queue item was updated
+ * Is Installed
+ * @default false
*/
- updated_at: string;
+ is_installed?: boolean;
/**
- * Started At
- * @description When this queue item was started
+ * Previous Names
+ * @default []
*/
- started_at?: string | null;
+ previous_names?: string[];
+ };
+ /**
+ * String2Output
+ * @description Base class for invocations that output two strings
+ */
+ String2Output: {
/**
- * Completed At
- * @description When this queue item was completed
+ * String 1
+ * @description string 1
*/
- completed_at?: string | null;
+ string_1: string;
/**
- * Queue Id
- * @description The id of the queue with which this item is associated
+ * String 2
+ * @description string 2
*/
- queue_id: string;
+ string_2: string;
/**
- * Field Values
- * @description The field values that were used for this queue item
+ * type
+ * @default string_2_output
+ * @constant
*/
- field_values?: components["schemas"]["NodeFieldValue"][] | null;
+ type: "string_2_output";
+ };
+ /**
+ * String Batch
+ * @description Create a batched generation, where the workflow is executed once for each string in the batch.
+ */
+ StringBatchInvocation: {
/**
- * Retried From Item Id
- * @description The item_id of the queue item that this item was retried from
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- retried_from_item_id?: number | null;
+ id: string;
/**
- * Is Api Validation Run
- * @description Whether this queue item is an API validation run.
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
* @default false
*/
- is_api_validation_run?: boolean;
- /**
- * Published Workflow Id
- * @description The ID of the published workflow associated with this queue item
- */
- published_workflow_id?: string | null;
+ is_intermediate?: boolean;
/**
- * Credits
- * @description The total credits used for this queue item
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- credits?: number | null;
- /** @description The fully-populated session to be executed */
- session: components["schemas"]["GraphExecutionState"];
- /** @description The workflow associated with this queue item */
- workflow?: components["schemas"]["WorkflowWithoutID"] | null;
- };
- /** SessionQueueStatus */
- SessionQueueStatus: {
+ use_cache?: boolean;
/**
- * Queue Id
- * @description The ID of the queue
+ * Batch Group
+ * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size.
+ * @default None
+ * @enum {string}
*/
- queue_id: string;
+ batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5";
/**
- * Item Id
- * @description The current queue item id
+ * Strings
+ * @description The strings to batch over
+ * @default null
*/
- item_id: number | null;
+ strings?: string[] | null;
/**
- * Batch Id
- * @description The current queue item's batch id
+ * type
+ * @default string_batch
+ * @constant
*/
- batch_id: string | null;
+ type: "string_batch";
+ };
+ /**
+ * String Collection Primitive
+ * @description A collection of string primitive values
+ */
+ StringCollectionInvocation: {
/**
- * Session Id
- * @description The current queue item's session id
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- session_id: string | null;
+ id: string;
/**
- * Pending
- * @description Number of queue items with status 'pending'
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- pending: number;
+ is_intermediate?: boolean;
/**
- * In Progress
- * @description Number of queue items with status 'in_progress'
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- in_progress: number;
+ use_cache?: boolean;
/**
- * Completed
- * @description Number of queue items with status 'complete'
+ * Collection
+ * @description The collection of string values
+ * @default []
*/
- completed: number;
+ collection?: string[];
/**
- * Failed
- * @description Number of queue items with status 'error'
+ * type
+ * @default string_collection
+ * @constant
*/
- failed: number;
+ type: "string_collection";
+ };
+ /**
+ * StringCollectionOutput
+ * @description Base class for nodes that output a collection of strings
+ */
+ StringCollectionOutput: {
/**
- * Canceled
- * @description Number of queue items with status 'canceled'
+ * Collection
+ * @description The output strings
*/
- canceled: number;
+ collection: string[];
/**
- * Total
- * @description Total number of queue items
+ * type
+ * @default string_collection_output
+ * @constant
*/
- total: number;
+ type: "string_collection_output";
};
/**
- * Show Image
- * @description Displays a provided image using the OS image viewer, and passes it forward in the pipeline.
+ * String Generator
+ * @description Generated a range of strings for use in a batched generation
*/
- ShowImageInvocation: {
+ StringGenerator: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -22322,110 +21784,117 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The image to show
- * @default null
+ * Generator Type
+ * @description The string generator.
*/
- image?: components["schemas"]["ImageField"] | null;
+ generator: components["schemas"]["StringGeneratorField"];
/**
* type
- * @default show_image
+ * @default string_generator
* @constant
*/
- type: "show_image";
+ type: "string_generator";
};
+ /** StringGeneratorField */
+ StringGeneratorField: Record;
/**
- * SigLIP_Diffusers_Config
- * @description Model config for SigLIP.
+ * StringGeneratorOutput
+ * @description Base class for nodes that output a collection of strings
*/
- SigLIP_Diffusers_Config: {
+ StringGeneratorOutput: {
/**
- * Key
- * @description A unique key for this model.
+ * Strings
+ * @description The generated strings
*/
- key: string;
+ strings: string[];
/**
- * Hash
- * @description The hash of the model file(s).
+ * type
+ * @default string_generator_output
+ * @constant
*/
- hash: string;
+ type: "string_generator_output";
+ };
+ /**
+ * String Primitive
+ * @description A string primitive value
+ */
+ StringInvocation: {
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- path: string;
+ id: string;
/**
- * File Size
- * @description The size of the model in bytes.
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- file_size: number;
+ is_intermediate?: boolean;
/**
- * Name
- * @description Name of the model.
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- name: string;
+ use_cache?: boolean;
/**
- * Description
- * @description Model description
+ * Value
+ * @description The string value
+ * @default
*/
- description: string | null;
+ value?: string;
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * type
+ * @default string
+ * @constant
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ type: "string";
+ };
+ /**
+ * String Join
+ * @description Joins string left to string right
+ */
+ StringJoinInvocation: {
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- source_api_response: string | null;
+ id: string;
/**
- * Cover Image
- * @description Url for image to preview model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- cover_image: string | null;
+ is_intermediate?: boolean;
/**
- * Usage Info
- * @description Usage information for this model
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- usage_info: string | null;
+ use_cache?: boolean;
/**
- * Format
- * @default diffusers
- * @constant
+ * String Left
+ * @description String Left
+ * @default
*/
- format: "diffusers";
- /** @default */
- repo_variant: components["schemas"]["ModelRepoVariant"];
+ string_left?: string;
/**
- * Type
- * @default siglip
- * @constant
+ * String Right
+ * @description String Right
+ * @default
*/
- type: "siglip";
+ string_right?: string;
/**
- * Base
- * @default any
+ * type
+ * @default string_join
* @constant
*/
- base: "any";
+ type: "string_join";
};
/**
- * Image-to-Image (Autoscale)
- * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached.
+ * String Join Three
+ * @description Joins string left to string middle to string right
*/
- SpandrelImageToImageAutoscaleInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
- /**
- * @description Optional metadata to be saved with the image
- * @default null
- */
- metadata?: components["schemas"]["MetadataField"] | null;
+ StringJoinThreeInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -22444,56 +21913,74 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The input image
- * @default null
+ * String Left
+ * @description String Left
+ * @default
*/
- image?: components["schemas"]["ImageField"] | null;
+ string_left?: string;
/**
- * Image-to-Image Model
- * @description Image-to-Image model
- * @default null
+ * String Middle
+ * @description String Middle
+ * @default
*/
- image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null;
+ string_middle?: string;
/**
- * Tile Size
- * @description The tile size for tiled image-to-image. Set to 0 to disable tiling.
- * @default 512
+ * String Right
+ * @description String Right
+ * @default
*/
- tile_size?: number;
+ string_right?: string;
/**
* type
- * @default spandrel_image_to_image_autoscale
+ * @default string_join_three
* @constant
*/
- type: "spandrel_image_to_image_autoscale";
+ type: "string_join_three";
+ };
+ /**
+ * StringOutput
+ * @description Base class for nodes that output a single string
+ */
+ StringOutput: {
/**
- * Scale
- * @description The final scale of the output image. If the model does not upscale the image, this will be ignored.
- * @default 4
+ * Value
+ * @description The output string
*/
- scale?: number;
+ value: string;
/**
- * Fit To Multiple Of 8
- * @description If true, the output image will be resized to the nearest multiple of 8 in both dimensions.
- * @default false
+ * type
+ * @default string_output
+ * @constant
*/
- fit_to_multiple_of_8?: boolean;
+ type: "string_output";
};
/**
- * Image-to-Image
- * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel).
+ * StringPosNegOutput
+ * @description Base class for invocations that output a positive and negative string
*/
- SpandrelImageToImageInvocation: {
+ StringPosNegOutput: {
/**
- * @description The board to save the image to
- * @default null
+ * Positive String
+ * @description Positive string
*/
- board?: components["schemas"]["BoardField"] | null;
+ positive_string: string;
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Negative String
+ * @description Negative string
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ negative_string: string;
+ /**
+ * type
+ * @default string_pos_neg_output
+ * @constant
+ */
+ type: "string_pos_neg_output";
+ };
+ /**
+ * String Replace
+ * @description Replaces the search string with the replace string
+ */
+ StringReplaceInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -22512,220 +21999,153 @@ export type components = {
*/
use_cache?: boolean;
/**
- * @description The input image
- * @default null
+ * String
+ * @description String to work on
+ * @default
*/
- image?: components["schemas"]["ImageField"] | null;
+ string?: string;
/**
- * Image-to-Image Model
- * @description Image-to-Image model
- * @default null
+ * Search String
+ * @description String to search for
+ * @default
*/
- image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null;
+ search_string?: string;
/**
- * Tile Size
- * @description The tile size for tiled image-to-image. Set to 0 to disable tiling.
- * @default 512
+ * Replace String
+ * @description String to replace the search
+ * @default
*/
- tile_size?: number;
+ replace_string?: string;
+ /**
+ * Use Regex
+ * @description Use search string as a regex expression (non regex is case insensitive)
+ * @default false
+ */
+ use_regex?: boolean;
/**
* type
- * @default spandrel_image_to_image
+ * @default string_replace
* @constant
*/
- type: "spandrel_image_to_image";
+ type: "string_replace";
};
/**
- * Spandrel_Checkpoint_Config
- * @description Model config for Spandrel Image to Image models.
+ * String Split
+ * @description Splits string into two strings, based on the first occurance of the delimiter. The delimiter will be removed from the string
*/
- Spandrel_Checkpoint_Config: {
+ StringSplitInvocation: {
/**
- * Key
- * @description A unique key for this model.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- key: string;
+ id: string;
/**
- * Hash
- * @description The hash of the model file(s).
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- hash: string;
+ is_intermediate?: boolean;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- path: string;
+ use_cache?: boolean;
/**
- * File Size
- * @description The size of the model in bytes.
+ * String
+ * @description String to split
+ * @default
*/
- file_size: number;
+ string?: string;
/**
- * Name
- * @description Name of the model.
+ * Delimiter
+ * @description Delimiter to spilt with. blank will split on the first whitespace
+ * @default
*/
- name: string;
+ delimiter?: string;
/**
- * Description
- * @description Model description
+ * type
+ * @default string_split
+ * @constant
*/
- description: string | null;
+ type: "string_split";
+ };
+ /**
+ * String Split Negative
+ * @description Splits string into two strings, inside [] goes into negative string everthing else goes into positive string. Each [ and ] character is replaced with a space
+ */
+ StringSplitNegInvocation: {
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- cover_image: string | null;
+ id: string;
/**
- * Usage Info
- * @description Usage information for this model
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- usage_info: string | null;
+ is_intermediate?: boolean;
/**
- * Base
- * @default any
- * @constant
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- base: "any";
+ use_cache?: boolean;
/**
- * Type
- * @default spandrel_image_to_image
- * @constant
+ * String
+ * @description String to split
+ * @default
*/
- type: "spandrel_image_to_image";
+ string?: string;
/**
- * Format
- * @default checkpoint
+ * type
+ * @default string_split_neg
* @constant
*/
- format: "checkpoint";
- };
- /** StarredImagesResult */
- StarredImagesResult: {
- /**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
- */
- affected_boards: string[];
- /**
- * Starred Images
- * @description The names of the images that were starred
- */
- starred_images: string[];
- };
- /** StarredVideosResult */
- StarredVideosResult: {
- /**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
- */
- affected_boards: string[];
- /**
- * Starred Videos
- * @description The ids of the videos that were starred
- */
- starred_videos: string[];
+ type: "string_split_neg";
};
- /** StarterModel */
- StarterModel: {
- /** Description */
- description: string;
- /** Source */
- source: string;
- /** Name */
- name: string;
- base: components["schemas"]["BaseModelType"];
- type: components["schemas"]["ModelType"];
- format?: components["schemas"]["ModelFormat"] | null;
- /**
- * Is Installed
- * @default false
- */
- is_installed?: boolean;
+ /** StylePresetRecordWithImage */
+ StylePresetRecordWithImage: {
/**
- * Previous Names
- * @default []
+ * Name
+ * @description The name of the style preset.
*/
- previous_names?: string[];
- /** Dependencies */
- dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null;
- };
- /** StarterModelBundle */
- StarterModelBundle: {
- /** Name */
- name: string;
- /** Models */
- models: components["schemas"]["StarterModel"][];
- };
- /** StarterModelResponse */
- StarterModelResponse: {
- /** Starter Models */
- starter_models: components["schemas"]["StarterModel"][];
- /** Starter Bundles */
- starter_bundles: {
- [key: string]: components["schemas"]["StarterModelBundle"];
- };
- };
- /** StarterModelWithoutDependencies */
- StarterModelWithoutDependencies: {
- /** Description */
- description: string;
- /** Source */
- source: string;
- /** Name */
name: string;
- base: components["schemas"]["BaseModelType"];
- type: components["schemas"]["ModelType"];
- format?: components["schemas"]["ModelFormat"] | null;
+ /** @description The preset data */
+ preset_data: components["schemas"]["PresetData"];
+ /** @description The type of style preset */
+ type: components["schemas"]["PresetType"];
/**
- * Is Installed
- * @default false
+ * Id
+ * @description The style preset ID.
*/
- is_installed?: boolean;
+ id: string;
/**
- * Previous Names
- * @default []
+ * Image
+ * @description The path for image
*/
- previous_names?: string[];
+ image: string | null;
};
/**
- * String2Output
- * @description Base class for invocations that output two strings
+ * SubModelType
+ * @description Submodel type.
+ * @enum {string}
*/
- String2Output: {
- /**
- * String 1
- * @description string 1
- */
- string_1: string;
- /**
- * String 2
- * @description string 2
- */
- string_2: string;
- /**
- * type
- * @default string_2_output
- * @constant
- */
- type: "string_2_output";
+ SubModelType: "unet" | "transformer" | "text_encoder" | "text_encoder_2" | "text_encoder_3" | "tokenizer" | "tokenizer_2" | "tokenizer_3" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker";
+ /** SubmodelDefinition */
+ SubmodelDefinition: {
+ /** Path Or Prefix */
+ path_or_prefix: string;
+ model_type: components["schemas"]["ModelType"];
+ /** Variant */
+ variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null;
};
/**
- * String Batch
- * @description Create a batched generation, where the workflow is executed once for each string in the batch.
+ * Subtract Integers
+ * @description Subtracts two numbers
*/
- StringBatchInvocation: {
+ SubtractInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -22744,30 +22164,61 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Batch Group
- * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size.
- * @default None
- * @enum {string}
+ * A
+ * @description The first number
+ * @default 0
*/
- batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5";
+ a?: number;
/**
- * Strings
- * @description The strings to batch over
- * @default null
+ * B
+ * @description The second number
+ * @default 0
*/
- strings?: string[] | null;
+ b?: number;
/**
* type
- * @default string_batch
+ * @default sub
* @constant
*/
- type: "string_batch";
+ type: "sub";
+ };
+ /** T2IAdapterField */
+ T2IAdapterField: {
+ /** @description The T2I-Adapter image prompt. */
+ image: components["schemas"]["ImageField"];
+ /** @description The T2I-Adapter model to use. */
+ t2i_adapter_model: components["schemas"]["ModelIdentifierField"];
+ /**
+ * Weight
+ * @description The weight given to the T2I-Adapter
+ * @default 1
+ */
+ weight?: number | number[];
+ /**
+ * Begin Step Percent
+ * @description When the T2I-Adapter is first applied (% of total steps)
+ * @default 0
+ */
+ begin_step_percent?: number;
+ /**
+ * End Step Percent
+ * @description When the T2I-Adapter is last applied (% of total steps)
+ * @default 1
+ */
+ end_step_percent?: number;
+ /**
+ * Resize Mode
+ * @description The resize mode to use
+ * @default just_resize
+ * @enum {string}
+ */
+ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
};
/**
- * String Collection Primitive
- * @description A collection of string primitive values
+ * T2I-Adapter - SD1.5, SDXL
+ * @description Collects T2I-Adapter info to pass to other nodes.
*/
- StringCollectionInvocation: {
+ T2IAdapterInvocation: {
/**
* Id
* @description The id of this instance of an invocation. Must be unique among all instances of invocations.
@@ -22786,606 +22237,409 @@ export type components = {
*/
use_cache?: boolean;
/**
- * Collection
- * @description The collection of string values
- * @default []
+ * @description The IP-Adapter image prompt.
+ * @default null
*/
- collection?: string[];
+ image?: components["schemas"]["ImageField"] | null;
/**
- * type
- * @default string_collection
- * @constant
+ * T2I-Adapter Model
+ * @description The T2I-Adapter model.
+ * @default null
*/
- type: "string_collection";
- };
- /**
- * StringCollectionOutput
- * @description Base class for nodes that output a collection of strings
- */
- StringCollectionOutput: {
+ t2i_adapter_model?: components["schemas"]["ModelIdentifierField"] | null;
/**
- * Collection
- * @description The output strings
+ * Weight
+ * @description The weight given to the T2I-Adapter
+ * @default 1
*/
- collection: string[];
+ weight?: number | number[];
/**
- * type
- * @default string_collection_output
- * @constant
- */
- type: "string_collection_output";
- };
- /**
- * String Generator
- * @description Generated a range of strings for use in a batched generation
- */
- StringGenerator: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
- /**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Begin Step Percent
+ * @description When the T2I-Adapter is first applied (% of total steps)
+ * @default 0
*/
- is_intermediate?: boolean;
+ begin_step_percent?: number;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * End Step Percent
+ * @description When the T2I-Adapter is last applied (% of total steps)
+ * @default 1
*/
- use_cache?: boolean;
+ end_step_percent?: number;
/**
- * Generator Type
- * @description The string generator.
+ * Resize Mode
+ * @description The resize mode applied to the T2I-Adapter input image so that it matches the target output size.
+ * @default just_resize
+ * @enum {string}
*/
- generator: components["schemas"]["StringGeneratorField"];
+ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
/**
* type
- * @default string_generator
+ * @default t2i_adapter
* @constant
*/
- type: "string_generator";
+ type: "t2i_adapter";
};
- /** StringGeneratorField */
- StringGeneratorField: Record;
- /**
- * StringGeneratorOutput
- * @description Base class for nodes that output a collection of strings
- */
- StringGeneratorOutput: {
+ /** T2IAdapterMetadataField */
+ T2IAdapterMetadataField: {
+ /** @description The control image. */
+ image: components["schemas"]["ImageField"];
/**
- * Strings
- * @description The generated strings
+ * @description The control image, after processing.
+ * @default null
*/
- strings: string[];
+ processed_image?: components["schemas"]["ImageField"] | null;
+ /** @description The T2I-Adapter model to use. */
+ t2i_adapter_model: components["schemas"]["ModelIdentifierField"];
/**
- * type
- * @default string_generator_output
- * @constant
+ * Weight
+ * @description The weight given to the T2I-Adapter
+ * @default 1
*/
- type: "string_generator_output";
- };
- /**
- * String Primitive
- * @description A string primitive value
- */
- StringInvocation: {
+ weight?: number | number[];
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Begin Step Percent
+ * @description When the T2I-Adapter is first applied (% of total steps)
+ * @default 0
*/
- id: string;
+ begin_step_percent?: number;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * End Step Percent
+ * @description When the T2I-Adapter is last applied (% of total steps)
+ * @default 1
*/
- is_intermediate?: boolean;
+ end_step_percent?: number;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Resize Mode
+ * @description The resize mode to use
+ * @default just_resize
+ * @enum {string}
*/
- use_cache?: boolean;
+ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
+ };
+ /** T2IAdapterOutput */
+ T2IAdapterOutput: {
/**
- * Value
- * @description The string value
- * @default
+ * T2I Adapter
+ * @description T2I-Adapter(s) to apply
*/
- value?: string;
+ t2i_adapter: components["schemas"]["T2IAdapterField"];
/**
* type
- * @default string
+ * @default t2i_adapter_output
* @constant
*/
- type: "string";
+ type: "t2i_adapter_output";
};
- /**
- * String Join
- * @description Joins string left to string right
- */
- StringJoinInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
+ /** T2IAdapter_Diffusers_SD1_Config */
+ T2IAdapter_Diffusers_SD1_Config: {
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Key
+ * @description A unique key for this model.
*/
- is_intermediate?: boolean;
+ key: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Hash
+ * @description The hash of the model file(s).
*/
- use_cache?: boolean;
+ hash: string;
/**
- * String Left
- * @description String Left
- * @default
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- string_left?: string;
+ path: string;
/**
- * String Right
- * @description String Right
- * @default
+ * File Size
+ * @description The size of the model in bytes.
*/
- string_right?: string;
+ file_size: number;
/**
- * type
- * @default string_join
- * @constant
+ * Name
+ * @description Name of the model.
*/
- type: "string_join";
- };
- /**
- * String Join Three
- * @description Joins string left to string middle to string right
- */
- StringJoinThreeInvocation: {
+ name: string;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Description
+ * @description Model description
*/
- id: string;
+ description: string | null;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- is_intermediate?: boolean;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- use_cache?: boolean;
+ source_api_response: string | null;
/**
- * String Left
- * @description String Left
- * @default
+ * Cover Image
+ * @description Url for image to preview model
*/
- string_left?: string;
+ cover_image: string | null;
/**
- * String Middle
- * @description String Middle
- * @default
+ * Format
+ * @default diffusers
+ * @constant
*/
- string_middle?: string;
+ format: "diffusers";
+ /** @default */
+ repo_variant: components["schemas"]["ModelRepoVariant"];
/**
- * String Right
- * @description String Right
- * @default
+ * Type
+ * @default t2i_adapter
+ * @constant
*/
- string_right?: string;
+ type: "t2i_adapter";
+ default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null;
/**
- * type
- * @default string_join_three
+ * Base
+ * @default sd-1
* @constant
*/
- type: "string_join_three";
+ base: "sd-1";
};
- /**
- * StringOutput
- * @description Base class for nodes that output a single string
- */
- StringOutput: {
+ /** T2IAdapter_Diffusers_SDXL_Config */
+ T2IAdapter_Diffusers_SDXL_Config: {
/**
- * Value
- * @description The output string
+ * Key
+ * @description A unique key for this model.
*/
- value: string;
+ key: string;
/**
- * type
- * @default string_output
- * @constant
+ * Hash
+ * @description The hash of the model file(s).
*/
- type: "string_output";
- };
- /**
- * StringPosNegOutput
- * @description Base class for invocations that output a positive and negative string
- */
- StringPosNegOutput: {
+ hash: string;
/**
- * Positive String
- * @description Positive string
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- positive_string: string;
+ path: string;
/**
- * Negative String
- * @description Negative string
+ * File Size
+ * @description The size of the model in bytes.
*/
- negative_string: string;
+ file_size: number;
/**
- * type
- * @default string_pos_neg_output
- * @constant
+ * Name
+ * @description Name of the model.
*/
- type: "string_pos_neg_output";
- };
- /**
- * String Replace
- * @description Replaces the search string with the replace string
- */
- StringReplaceInvocation: {
+ name: string;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Description
+ * @description Model description
*/
- id: string;
+ description: string | null;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- is_intermediate?: boolean;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- use_cache?: boolean;
+ source_api_response: string | null;
/**
- * String
- * @description String to work on
- * @default
+ * Cover Image
+ * @description Url for image to preview model
*/
- string?: string;
+ cover_image: string | null;
/**
- * Search String
- * @description String to search for
- * @default
+ * Format
+ * @default diffusers
+ * @constant
*/
- search_string?: string;
+ format: "diffusers";
+ /** @default */
+ repo_variant: components["schemas"]["ModelRepoVariant"];
/**
- * Replace String
- * @description String to replace the search
- * @default
+ * Type
+ * @default t2i_adapter
+ * @constant
*/
- replace_string?: string;
+ type: "t2i_adapter";
+ default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null;
/**
- * Use Regex
- * @description Use search string as a regex expression (non regex is case insensitive)
- * @default false
+ * Base
+ * @default sdxl
+ * @constant
*/
- use_regex?: boolean;
+ base: "sdxl";
+ };
+ /** T5EncoderField */
+ T5EncoderField: {
+ /** @description Info to load tokenizer submodel */
+ tokenizer: components["schemas"]["ModelIdentifierField"];
+ /** @description Info to load text_encoder submodel */
+ text_encoder: components["schemas"]["ModelIdentifierField"];
/**
- * type
- * @default string_replace
- * @constant
+ * Loras
+ * @description LoRAs to apply on model loading
*/
- type: "string_replace";
+ loras: components["schemas"]["LoRAField"][];
};
/**
- * String Split
- * @description Splits string into two strings, based on the first occurance of the delimiter. The delimiter will be removed from the string
+ * T5Encoder_BnBLLMint8_Config
+ * @description Configuration for T5 Encoder models quantized by bitsandbytes' LLM.int8.
*/
- StringSplitInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
+ T5Encoder_BnBLLMint8_Config: {
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Key
+ * @description A unique key for this model.
*/
- is_intermediate?: boolean;
+ key: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Hash
+ * @description The hash of the model file(s).
*/
- use_cache?: boolean;
+ hash: string;
/**
- * String
- * @description String to split
- * @default
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- string?: string;
+ path: string;
/**
- * Delimiter
- * @description Delimiter to spilt with. blank will split on the first whitespace
- * @default
+ * File Size
+ * @description The size of the model in bytes.
*/
- delimiter?: string;
+ file_size: number;
/**
- * type
- * @default string_split
- * @constant
+ * Name
+ * @description Name of the model.
*/
- type: "string_split";
- };
- /**
- * String Split Negative
- * @description Splits string into two strings, inside [] goes into negative string everthing else goes into positive string. Each [ and ] character is replaced with a space
- */
- StringSplitNegInvocation: {
+ name: string;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Description
+ * @description Model description
*/
- id: string;
+ description: string | null;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- is_intermediate?: boolean;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- use_cache?: boolean;
+ source_api_response: string | null;
/**
- * String
- * @description String to split
- * @default
+ * Cover Image
+ * @description Url for image to preview model
*/
- string?: string;
+ cover_image: string | null;
/**
- * type
- * @default string_split_neg
+ * Base
+ * @default any
* @constant
*/
- type: "string_split_neg";
- };
- /** StylePresetRecordWithImage */
- StylePresetRecordWithImage: {
- /**
- * Name
- * @description The name of the style preset.
- */
- name: string;
- /** @description The preset data */
- preset_data: components["schemas"]["PresetData"];
- /** @description The type of style preset */
- type: components["schemas"]["PresetType"];
+ base: "any";
/**
- * Id
- * @description The style preset ID.
+ * Type
+ * @default t5_encoder
+ * @constant
*/
- id: string;
+ type: "t5_encoder";
/**
- * Image
- * @description The path for image
+ * Format
+ * @default bnb_quantized_int8b
+ * @constant
*/
- image: string | null;
- };
- /**
- * SubModelType
- * @description Submodel type.
- * @enum {string}
- */
- SubModelType: "unet" | "transformer" | "text_encoder" | "text_encoder_2" | "text_encoder_3" | "tokenizer" | "tokenizer_2" | "tokenizer_3" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker";
- /** SubmodelDefinition */
- SubmodelDefinition: {
- /** Path Or Prefix */
- path_or_prefix: string;
- model_type: components["schemas"]["ModelType"];
- /** Variant */
- variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null;
+ format: "bnb_quantized_int8b";
};
/**
- * Subtract Integers
- * @description Subtracts two numbers
+ * T5Encoder_T5Encoder_Config
+ * @description Configuration for T5 Encoder models in a bespoke, diffusers-like format. The model weights are expected to be in
+ * a folder called text_encoder_2 inside the model directory, with a config file named model.safetensors.index.json.
*/
- SubtractInvocation: {
+ T5Encoder_T5Encoder_Config: {
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Key
+ * @description A unique key for this model.
*/
- id: string;
+ key: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Hash
+ * @description The hash of the model file(s).
*/
- is_intermediate?: boolean;
+ hash: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- use_cache?: boolean;
+ path: string;
/**
- * A
- * @description The first number
- * @default 0
+ * File Size
+ * @description The size of the model in bytes.
*/
- a?: number;
+ file_size: number;
/**
- * B
- * @description The second number
- * @default 0
+ * Name
+ * @description Name of the model.
*/
- b?: number;
+ name: string;
/**
- * type
- * @default sub
- * @constant
- */
- type: "sub";
- };
- /** T2IAdapterField */
- T2IAdapterField: {
- /** @description The T2I-Adapter image prompt. */
- image: components["schemas"]["ImageField"];
- /** @description The T2I-Adapter model to use. */
- t2i_adapter_model: components["schemas"]["ModelIdentifierField"];
- /**
- * Weight
- * @description The weight given to the T2I-Adapter
- * @default 1
- */
- weight?: number | number[];
- /**
- * Begin Step Percent
- * @description When the T2I-Adapter is first applied (% of total steps)
- * @default 0
- */
- begin_step_percent?: number;
- /**
- * End Step Percent
- * @description When the T2I-Adapter is last applied (% of total steps)
- * @default 1
- */
- end_step_percent?: number;
- /**
- * Resize Mode
- * @description The resize mode to use
- * @default just_resize
- * @enum {string}
- */
- resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
- };
- /**
- * T2I-Adapter - SD1.5, SDXL
- * @description Collects T2I-Adapter info to pass to other nodes.
- */
- T2IAdapterInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
- /**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
- */
- is_intermediate?: boolean;
- /**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
- */
- use_cache?: boolean;
- /**
- * @description The IP-Adapter image prompt.
- * @default null
- */
- image?: components["schemas"]["ImageField"] | null;
- /**
- * T2I-Adapter Model
- * @description The T2I-Adapter model.
- * @default null
- */
- t2i_adapter_model?: components["schemas"]["ModelIdentifierField"] | null;
- /**
- * Weight
- * @description The weight given to the T2I-Adapter
- * @default 1
+ * Description
+ * @description Model description
*/
- weight?: number | number[];
+ description: string | null;
/**
- * Begin Step Percent
- * @description When the T2I-Adapter is first applied (% of total steps)
- * @default 0
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- begin_step_percent?: number;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * End Step Percent
- * @description When the T2I-Adapter is last applied (% of total steps)
- * @default 1
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- end_step_percent?: number;
+ source_api_response: string | null;
/**
- * Resize Mode
- * @description The resize mode applied to the T2I-Adapter input image so that it matches the target output size.
- * @default just_resize
- * @enum {string}
+ * Cover Image
+ * @description Url for image to preview model
*/
- resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
+ cover_image: string | null;
/**
- * type
- * @default t2i_adapter
+ * Base
+ * @default any
* @constant
*/
- type: "t2i_adapter";
- };
- /** T2IAdapterMetadataField */
- T2IAdapterMetadataField: {
- /** @description The control image. */
- image: components["schemas"]["ImageField"];
- /**
- * @description The control image, after processing.
- * @default null
- */
- processed_image?: components["schemas"]["ImageField"] | null;
- /** @description The T2I-Adapter model to use. */
- t2i_adapter_model: components["schemas"]["ModelIdentifierField"];
- /**
- * Weight
- * @description The weight given to the T2I-Adapter
- * @default 1
- */
- weight?: number | number[];
- /**
- * Begin Step Percent
- * @description When the T2I-Adapter is first applied (% of total steps)
- * @default 0
- */
- begin_step_percent?: number;
- /**
- * End Step Percent
- * @description When the T2I-Adapter is last applied (% of total steps)
- * @default 1
- */
- end_step_percent?: number;
- /**
- * Resize Mode
- * @description The resize mode to use
- * @default just_resize
- * @enum {string}
- */
- resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
- };
- /** T2IAdapterOutput */
- T2IAdapterOutput: {
+ base: "any";
/**
- * T2I Adapter
- * @description T2I-Adapter(s) to apply
+ * Type
+ * @default t5_encoder
+ * @constant
*/
- t2i_adapter: components["schemas"]["T2IAdapterField"];
+ type: "t5_encoder";
/**
- * type
- * @default t2i_adapter_output
+ * Format
+ * @default t5_encoder
* @constant
*/
- type: "t2i_adapter_output";
+ format: "t5_encoder";
};
- /** T2IAdapter_Diffusers_SD1_Config */
- T2IAdapter_Diffusers_SD1_Config: {
+ /** TBLR */
+ TBLR: {
+ /** Top */
+ top: number;
+ /** Bottom */
+ bottom: number;
+ /** Left */
+ left: number;
+ /** Right */
+ right: number;
+ };
+ /** TI_File_SD1_Config */
+ TI_File_SD1_Config: {
/**
* Key
* @description A unique key for this model.
@@ -23434,25 +22688,17 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Format
- * @default diffusers
+ * Type
+ * @default embedding
* @constant
*/
- format: "diffusers";
- /** @default */
- repo_variant: components["schemas"]["ModelRepoVariant"];
+ type: "embedding";
/**
- * Type
- * @default t2i_adapter
+ * Format
+ * @default embedding_file
* @constant
*/
- type: "t2i_adapter";
- default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null;
+ format: "embedding_file";
/**
* Base
* @default sd-1
@@ -23460,8 +22706,8 @@ export type components = {
*/
base: "sd-1";
};
- /** T2IAdapter_Diffusers_SDXL_Config */
- T2IAdapter_Diffusers_SDXL_Config: {
+ /** TI_File_SD2_Config */
+ TI_File_SD2_Config: {
/**
* Key
* @description A unique key for this model.
@@ -23510,49 +22756,26 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Format
- * @default diffusers
+ * Type
+ * @default embedding
* @constant
*/
- format: "diffusers";
- /** @default */
- repo_variant: components["schemas"]["ModelRepoVariant"];
+ type: "embedding";
/**
- * Type
- * @default t2i_adapter
+ * Format
+ * @default embedding_file
* @constant
*/
- type: "t2i_adapter";
- default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null;
+ format: "embedding_file";
/**
* Base
- * @default sdxl
+ * @default sd-2
* @constant
*/
- base: "sdxl";
- };
- /** T5EncoderField */
- T5EncoderField: {
- /** @description Info to load tokenizer submodel */
- tokenizer: components["schemas"]["ModelIdentifierField"];
- /** @description Info to load text_encoder submodel */
- text_encoder: components["schemas"]["ModelIdentifierField"];
- /**
- * Loras
- * @description LoRAs to apply on model loading
- */
- loras: components["schemas"]["LoRAField"][];
+ base: "sd-2";
};
- /**
- * T5Encoder_BnBLLMint8_Config
- * @description Configuration for T5 Encoder models quantized by bitsandbytes' LLM.int8.
- */
- T5Encoder_BnBLLMint8_Config: {
+ /** TI_File_SDXL_Config */
+ TI_File_SDXL_Config: {
/**
* Key
* @description A unique key for this model.
@@ -23601,35 +22824,26 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Base
- * @default any
+ * Type
+ * @default embedding
* @constant
*/
- base: "any";
+ type: "embedding";
/**
- * Type
- * @default t5_encoder
+ * Format
+ * @default embedding_file
* @constant
*/
- type: "t5_encoder";
+ format: "embedding_file";
/**
- * Format
- * @default bnb_quantized_int8b
+ * Base
+ * @default sdxl
* @constant
*/
- format: "bnb_quantized_int8b";
+ base: "sdxl";
};
- /**
- * T5Encoder_T5Encoder_Config
- * @description Configuration for T5 Encoder models in a bespoke, diffusers-like format. The model weights are expected to be in
- * a folder called text_encoder_2 inside the model directory, with a config file named model.safetensors.index.json.
- */
- T5Encoder_T5Encoder_Config: {
+ /** TI_Folder_SD1_Config */
+ TI_Folder_SD1_Config: {
/**
* Key
* @description A unique key for this model.
@@ -23678,42 +22892,26 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Base
- * @default any
+ * Type
+ * @default embedding
* @constant
*/
- base: "any";
+ type: "embedding";
/**
- * Type
- * @default t5_encoder
+ * Format
+ * @default embedding_folder
* @constant
*/
- type: "t5_encoder";
+ format: "embedding_folder";
/**
- * Format
- * @default t5_encoder
+ * Base
+ * @default sd-1
* @constant
*/
- format: "t5_encoder";
- };
- /** TBLR */
- TBLR: {
- /** Top */
- top: number;
- /** Bottom */
- bottom: number;
- /** Left */
- left: number;
- /** Right */
- right: number;
+ base: "sd-1";
};
- /** TI_File_SD1_Config */
- TI_File_SD1_Config: {
+ /** TI_Folder_SD2_Config */
+ TI_Folder_SD2_Config: {
/**
* Key
* @description A unique key for this model.
@@ -23761,11 +22959,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default embedding
@@ -23774,19 +22967,19 @@ export type components = {
type: "embedding";
/**
* Format
- * @default embedding_file
+ * @default embedding_folder
* @constant
*/
- format: "embedding_file";
+ format: "embedding_folder";
/**
* Base
- * @default sd-1
+ * @default sd-2
* @constant
*/
- base: "sd-1";
+ base: "sd-2";
};
- /** TI_File_SD2_Config */
- TI_File_SD2_Config: {
+ /** TI_Folder_SDXL_Config */
+ TI_Folder_SDXL_Config: {
/**
* Key
* @description A unique key for this model.
@@ -23834,11 +23027,6 @@ export type components = {
* @description Url for image to preview model
*/
cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
/**
* Type
* @default embedding
@@ -23847,165 +23035,415 @@ export type components = {
type: "embedding";
/**
* Format
- * @default embedding_file
+ * @default embedding_folder
* @constant
*/
- format: "embedding_file";
+ format: "embedding_folder";
/**
* Base
- * @default sd-2
+ * @default sdxl
* @constant
*/
- base: "sd-2";
+ base: "sdxl";
};
- /** TI_File_SDXL_Config */
- TI_File_SDXL_Config: {
+ /**
+ * TensorField
+ * @description A tensor primitive field.
+ */
+ TensorField: {
/**
- * Key
- * @description A unique key for this model.
+ * Tensor Name
+ * @description The name of a tensor.
*/
- key: string;
+ tensor_name: string;
+ };
+ /** Tile */
+ Tile: {
+ /** @description The coordinates of this tile relative to its parent image. */
+ coords: components["schemas"]["TBLR"];
+ /** @description The amount of overlap with adjacent tiles on each side of this tile. */
+ overlap: components["schemas"]["TBLR"];
+ };
+ /**
+ * Tile to Properties
+ * @description Split a Tile into its individual properties.
+ */
+ TileToPropertiesInvocation: {
/**
- * Hash
- * @description The hash of the model file(s).
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- hash: string;
+ id: string;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- path: string;
+ is_intermediate?: boolean;
/**
- * File Size
- * @description The size of the model in bytes.
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- file_size: number;
+ use_cache?: boolean;
/**
- * Name
- * @description Name of the model.
+ * @description The tile to split into properties.
+ * @default null
*/
- name: string;
+ tile?: components["schemas"]["Tile"] | null;
/**
- * Description
- * @description Model description
+ * type
+ * @default tile_to_properties
+ * @constant
*/
- description: string | null;
+ type: "tile_to_properties";
+ };
+ /** TileToPropertiesOutput */
+ TileToPropertiesOutput: {
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * Coords Left
+ * @description Left coordinate of the tile relative to its parent image.
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ coords_left: number;
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * Coords Right
+ * @description Right coordinate of the tile relative to its parent image.
*/
- source_api_response: string | null;
+ coords_right: number;
/**
- * Cover Image
- * @description Url for image to preview model
+ * Coords Top
+ * @description Top coordinate of the tile relative to its parent image.
*/
- cover_image: string | null;
+ coords_top: number;
/**
- * Usage Info
- * @description Usage information for this model
+ * Coords Bottom
+ * @description Bottom coordinate of the tile relative to its parent image.
*/
- usage_info: string | null;
+ coords_bottom: number;
/**
- * Type
- * @default embedding
- * @constant
+ * Width
+ * @description The width of the tile. Equal to coords_right - coords_left.
*/
- type: "embedding";
+ width: number;
/**
- * Format
- * @default embedding_file
- * @constant
+ * Height
+ * @description The height of the tile. Equal to coords_bottom - coords_top.
*/
- format: "embedding_file";
+ height: number;
/**
- * Base
- * @default sdxl
- * @constant
+ * Overlap Top
+ * @description Overlap between this tile and its top neighbor.
*/
- base: "sdxl";
- };
- /** TI_Folder_SD1_Config */
- TI_Folder_SD1_Config: {
+ overlap_top: number;
/**
- * Key
- * @description A unique key for this model.
+ * Overlap Bottom
+ * @description Overlap between this tile and its bottom neighbor.
*/
- key: string;
+ overlap_bottom: number;
/**
- * Hash
- * @description The hash of the model file(s).
+ * Overlap Left
+ * @description Overlap between this tile and its left neighbor.
*/
- hash: string;
+ overlap_left: number;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Overlap Right
+ * @description Overlap between this tile and its right neighbor.
*/
- path: string;
+ overlap_right: number;
/**
- * File Size
- * @description The size of the model in bytes.
+ * type
+ * @default tile_to_properties_output
+ * @constant
*/
- file_size: number;
+ type: "tile_to_properties_output";
+ };
+ /** TileWithImage */
+ TileWithImage: {
+ tile: components["schemas"]["Tile"];
+ image: components["schemas"]["ImageField"];
+ };
+ /**
+ * Tiled Multi-Diffusion Denoise - SD1.5, SDXL
+ * @description Tiled Multi-Diffusion denoising.
+ *
+ * This node handles automatically tiling the input image, and is primarily intended for global refinement of images
+ * in tiled upscaling workflows. Future Multi-Diffusion nodes should allow the user to specify custom regions with
+ * different parameters for each region to harness the full power of Multi-Diffusion.
+ *
+ * This node has a similar interface to the `DenoiseLatents` node, but it has a reduced feature set (no IP-Adapter,
+ * T2I-Adapter, masking, etc.).
+ */
+ TiledMultiDiffusionDenoiseLatents: {
/**
- * Name
- * @description Name of the model.
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
*/
- name: string;
+ id: string;
/**
- * Description
- * @description Model description
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
*/
- description: string | null;
+ is_intermediate?: boolean;
/**
- * Source
- * @description The original source of the model (path, URL or repo_id).
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
*/
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
+ use_cache?: boolean;
/**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
+ * @description Positive conditioning tensor
+ * @default null
*/
- source_api_response: string | null;
+ positive_conditioning?: components["schemas"]["ConditioningField"] | null;
/**
- * Cover Image
- * @description Url for image to preview model
+ * @description Negative conditioning tensor
+ * @default null
*/
- cover_image: string | null;
+ negative_conditioning?: components["schemas"]["ConditioningField"] | null;
/**
- * Usage Info
- * @description Usage information for this model
+ * @description Noise tensor
+ * @default null
*/
- usage_info: string | null;
+ noise?: components["schemas"]["LatentsField"] | null;
/**
- * Type
- * @default embedding
- * @constant
+ * @description Latents tensor
+ * @default null
*/
- type: "embedding";
+ latents?: components["schemas"]["LatentsField"] | null;
/**
- * Format
- * @default embedding_folder
+ * Tile Height
+ * @description Height of the tiles in image space.
+ * @default 1024
+ */
+ tile_height?: number;
+ /**
+ * Tile Width
+ * @description Width of the tiles in image space.
+ * @default 1024
+ */
+ tile_width?: number;
+ /**
+ * Tile Overlap
+ * @description The overlap between adjacent tiles in pixel space. (Of course, tile merging is applied in latent space.) Tiles will be cropped during merging (if necessary) to ensure that they overlap by exactly this amount.
+ * @default 32
+ */
+ tile_overlap?: number;
+ /**
+ * Steps
+ * @description Number of steps to run
+ * @default 18
+ */
+ steps?: number;
+ /**
+ * CFG Scale
+ * @description Classifier-Free Guidance scale
+ * @default 6
+ */
+ cfg_scale?: number | number[];
+ /**
+ * Denoising Start
+ * @description When to start denoising, expressed a percentage of total steps
+ * @default 0
+ */
+ denoising_start?: number;
+ /**
+ * Denoising End
+ * @description When to stop denoising, expressed a percentage of total steps
+ * @default 1
+ */
+ denoising_end?: number;
+ /**
+ * Scheduler
+ * @description Scheduler to use during inference
+ * @default euler
+ * @enum {string}
+ */
+ scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
+ /**
+ * UNet
+ * @description UNet (scheduler, LoRAs)
+ * @default null
+ */
+ unet?: components["schemas"]["UNetField"] | null;
+ /**
+ * CFG Rescale Multiplier
+ * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR
+ * @default 0
+ */
+ cfg_rescale_multiplier?: number;
+ /**
+ * Control
+ * @default null
+ */
+ control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null;
+ /**
+ * type
+ * @default tiled_multi_diffusion_denoise_latents
* @constant
*/
- format: "embedding_folder";
+ type: "tiled_multi_diffusion_denoise_latents";
+ };
+ /** TransformerField */
+ TransformerField: {
+ /** @description Info to load Transformer submodel */
+ transformer: components["schemas"]["ModelIdentifierField"];
/**
- * Base
- * @default sd-1
+ * Loras
+ * @description LoRAs to apply on model loading
+ */
+ loras: components["schemas"]["LoRAField"][];
+ };
+ /**
+ * UIComponent
+ * @description The type of UI component to use for a field, used to override the default components, which are
+ * inferred from the field type.
+ * @enum {string}
+ */
+ UIComponent: "none" | "textarea" | "slider";
+ /**
+ * UIConfigBase
+ * @description Provides additional node configuration to the UI.
+ * This is used internally by the @invocation decorator logic. Do not use this directly.
+ */
+ UIConfigBase: {
+ /**
+ * Tags
+ * @description The node's tags
+ * @default null
+ */
+ tags: string[] | null;
+ /**
+ * Title
+ * @description The node's display name
+ * @default null
+ */
+ title: string | null;
+ /**
+ * Category
+ * @description The node's category
+ * @default null
+ */
+ category: string | null;
+ /**
+ * Version
+ * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".
+ */
+ version: string;
+ /**
+ * Node Pack
+ * @description The node pack that this node belongs to, will be 'invokeai' for built-in nodes
+ */
+ node_pack: string;
+ /**
+ * @description The node's classification
+ * @default stable
+ */
+ classification: components["schemas"]["Classification"];
+ };
+ /**
+ * UIType
+ * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type.
+ *
+ * - Model Fields
+ * The most common node-author-facing use will be for model fields. Internally, there is no difference
+ * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the
+ * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that
+ * the field is an SDXL main model field.
+ *
+ * - Any Field
+ * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to
+ * indicate that the field accepts any type. Use with caution. This cannot be used on outputs.
+ *
+ * - Scheduler Field
+ * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field.
+ *
+ * - Internal Fields
+ * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate
+ * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These
+ * should not be used by node authors.
+ *
+ * - DEPRECATED Fields
+ * These types are deprecated and should not be used by node authors. A warning will be logged if one is
+ * used, and the type will be ignored. They are included here for backwards compatibility.
+ * @enum {string}
+ */
+ UIType: "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "IsIntermediate" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict" | "DEPRECATED_MainModelField" | "DEPRECATED_CogView4MainModelField" | "DEPRECATED_FluxMainModelField" | "DEPRECATED_SD3MainModelField" | "DEPRECATED_SDXLMainModelField" | "DEPRECATED_SDXLRefinerModelField" | "DEPRECATED_ONNXModelField" | "DEPRECATED_VAEModelField" | "DEPRECATED_FluxVAEModelField" | "DEPRECATED_LoRAModelField" | "DEPRECATED_ControlNetModelField" | "DEPRECATED_IPAdapterModelField" | "DEPRECATED_T2IAdapterModelField" | "DEPRECATED_T5EncoderModelField" | "DEPRECATED_CLIPEmbedModelField" | "DEPRECATED_CLIPLEmbedModelField" | "DEPRECATED_CLIPGEmbedModelField" | "DEPRECATED_SpandrelImageToImageModelField" | "DEPRECATED_ControlLoRAModelField" | "DEPRECATED_SigLipModelField" | "DEPRECATED_FluxReduxModelField" | "DEPRECATED_LLaVAModelField" | "DEPRECATED_Imagen3ModelField" | "DEPRECATED_Imagen4ModelField" | "DEPRECATED_ChatGPT4oModelField" | "DEPRECATED_Gemini2_5ModelField" | "DEPRECATED_FluxKontextModelField" | "DEPRECATED_Veo3ModelField" | "DEPRECATED_RunwayModelField";
+ /** UNetField */
+ UNetField: {
+ /** @description Info to load unet submodel */
+ unet: components["schemas"]["ModelIdentifierField"];
+ /** @description Info to load scheduler submodel */
+ scheduler: components["schemas"]["ModelIdentifierField"];
+ /**
+ * Loras
+ * @description LoRAs to apply on model loading
+ */
+ loras: components["schemas"]["LoRAField"][];
+ /**
+ * Seamless Axes
+ * @description Axes("x" and "y") to which apply seamless
+ */
+ seamless_axes?: string[];
+ /**
+ * @description FreeU configuration
+ * @default null
+ */
+ freeu_config?: components["schemas"]["FreeUConfig"] | null;
+ };
+ /**
+ * UNetOutput
+ * @description Base class for invocations that output a UNet field.
+ */
+ UNetOutput: {
+ /**
+ * UNet
+ * @description UNet (scheduler, LoRAs)
+ */
+ unet: components["schemas"]["UNetField"];
+ /**
+ * type
+ * @default unet_output
* @constant
*/
- base: "sd-1";
+ type: "unet_output";
};
- /** TI_Folder_SD2_Config */
- TI_Folder_SD2_Config: {
+ /**
+ * URLModelSource
+ * @description A generic URL point to a checkpoint file.
+ */
+ URLModelSource: {
+ /**
+ * Url
+ * Format: uri
+ */
+ url: string;
+ /** Access Token */
+ access_token?: string | null;
+ /**
+ * @description discriminator enum property added by openapi-typescript
+ * @enum {string}
+ */
+ type: "url";
+ };
+ /** URLRegexTokenPair */
+ URLRegexTokenPair: {
+ /**
+ * Url Regex
+ * @description Regular expression to match against the URL
+ */
+ url_regex: string;
+ /**
+ * Token
+ * @description Token to use when the URL matches the regex
+ */
+ token: string;
+ };
+ /**
+ * Unknown_Config
+ * @description Model config for unknown models, used as a fallback when we cannot positively identify a model.
+ */
+ Unknown_Config: {
/**
* Key
* @description A unique key for this model.
@@ -24054,31 +23492,157 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
+ * Base
+ * @default unknown
+ * @constant
*/
- usage_info: string | null;
+ base: "unknown";
/**
* Type
- * @default embedding
+ * @default unknown
* @constant
*/
- type: "embedding";
+ type: "unknown";
/**
* Format
- * @default embedding_folder
+ * @default unknown
* @constant
*/
- format: "embedding_folder";
- /**
- * Base
- * @default sd-2
+ format: "unknown";
+ };
+ /**
+ * Unsharp Mask
+ * @description Applies an unsharp mask filter to an image
+ */
+ UnsharpMaskInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * @description The image to use
+ * @default null
+ */
+ image?: components["schemas"]["ImageField"] | null;
+ /**
+ * Radius
+ * @description Unsharp mask radius
+ * @default 2
+ */
+ radius?: number;
+ /**
+ * Strength
+ * @description Unsharp mask strength
+ * @default 50
+ */
+ strength?: number;
+ /**
+ * type
+ * @default unsharp_mask
* @constant
*/
- base: "sd-2";
+ type: "unsharp_mask";
};
- /** TI_Folder_SDXL_Config */
- TI_Folder_SDXL_Config: {
+ /** UnstarredImagesResult */
+ UnstarredImagesResult: {
+ /**
+ * Affected Boards
+ * @description The ids of boards affected by the delete operation
+ */
+ affected_boards: string[];
+ /**
+ * Unstarred Images
+ * @description The names of the images that were unstarred
+ */
+ unstarred_images: string[];
+ };
+ /** VAEField */
+ VAEField: {
+ /** @description Info to load vae submodel */
+ vae: components["schemas"]["ModelIdentifierField"];
+ /**
+ * Seamless Axes
+ * @description Axes("x" and "y") to which apply seamless
+ */
+ seamless_axes?: string[];
+ };
+ /**
+ * VAE Model - SD1.5, SD2, SDXL, SD3, FLUX
+ * @description Loads a VAE model, outputting a VaeLoaderOutput
+ */
+ VAELoaderInvocation: {
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * VAE
+ * @description VAE model to load
+ * @default null
+ */
+ vae_model?: components["schemas"]["ModelIdentifierField"] | null;
+ /**
+ * type
+ * @default vae_loader
+ * @constant
+ */
+ type: "vae_loader";
+ };
+ /**
+ * VAEOutput
+ * @description Base class for invocations that output a VAE field
+ */
+ VAEOutput: {
+ /**
+ * VAE
+ * @description VAE
+ */
+ vae: components["schemas"]["VAEField"];
+ /**
+ * type
+ * @default vae_output
+ * @constant
+ */
+ type: "vae_output";
+ };
+ /** VAE_Checkpoint_FLUX_Config */
+ VAE_Checkpoint_FLUX_Config: {
/**
* Key
* @description A unique key for this model.
@@ -24127,427 +23691,320 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
+ * Config Path
+ * @description Path to the config for this model, if any.
*/
- usage_info: string | null;
+ config_path: string | null;
/**
* Type
- * @default embedding
+ * @default vae
* @constant
*/
- type: "embedding";
+ type: "vae";
/**
* Format
- * @default embedding_folder
+ * @default checkpoint
* @constant
*/
- format: "embedding_folder";
+ format: "checkpoint";
/**
* Base
- * @default sdxl
+ * @default flux
* @constant
*/
- base: "sdxl";
+ base: "flux";
};
- /**
- * TensorField
- * @description A tensor primitive field.
- */
- TensorField: {
+ /** VAE_Checkpoint_SD1_Config */
+ VAE_Checkpoint_SD1_Config: {
/**
- * Tensor Name
- * @description The name of a tensor.
+ * Key
+ * @description A unique key for this model.
*/
- tensor_name: string;
- };
- /** Tile */
- Tile: {
- /** @description The coordinates of this tile relative to its parent image. */
- coords: components["schemas"]["TBLR"];
- /** @description The amount of overlap with adjacent tiles on each side of this tile. */
- overlap: components["schemas"]["TBLR"];
- };
- /**
- * Tile to Properties
- * @description Split a Tile into its individual properties.
- */
- TileToPropertiesInvocation: {
+ key: string;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Hash
+ * @description The hash of the model file(s).
*/
- id: string;
+ hash: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- is_intermediate?: boolean;
+ path: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * File Size
+ * @description The size of the model in bytes.
*/
- use_cache?: boolean;
+ file_size: number;
/**
- * @description The tile to split into properties.
- * @default null
+ * Name
+ * @description Name of the model.
*/
- tile?: components["schemas"]["Tile"] | null;
+ name: string;
/**
- * type
- * @default tile_to_properties
- * @constant
+ * Description
+ * @description Model description
*/
- type: "tile_to_properties";
- };
- /** TileToPropertiesOutput */
- TileToPropertiesOutput: {
+ description: string | null;
/**
- * Coords Left
- * @description Left coordinate of the tile relative to its parent image.
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- coords_left: number;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * Coords Right
- * @description Right coordinate of the tile relative to its parent image.
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- coords_right: number;
+ source_api_response: string | null;
/**
- * Coords Top
- * @description Top coordinate of the tile relative to its parent image.
+ * Cover Image
+ * @description Url for image to preview model
*/
- coords_top: number;
+ cover_image: string | null;
/**
- * Coords Bottom
- * @description Bottom coordinate of the tile relative to its parent image.
+ * Config Path
+ * @description Path to the config for this model, if any.
*/
- coords_bottom: number;
+ config_path: string | null;
/**
- * Width
- * @description The width of the tile. Equal to coords_right - coords_left.
+ * Type
+ * @default vae
+ * @constant
*/
- width: number;
+ type: "vae";
/**
- * Height
- * @description The height of the tile. Equal to coords_bottom - coords_top.
+ * Format
+ * @default checkpoint
+ * @constant
*/
- height: number;
+ format: "checkpoint";
/**
- * Overlap Top
- * @description Overlap between this tile and its top neighbor.
- */
- overlap_top: number;
+ * Base
+ * @default sd-1
+ * @constant
+ */
+ base: "sd-1";
+ };
+ /** VAE_Checkpoint_SD2_Config */
+ VAE_Checkpoint_SD2_Config: {
/**
- * Overlap Bottom
- * @description Overlap between this tile and its bottom neighbor.
+ * Key
+ * @description A unique key for this model.
*/
- overlap_bottom: number;
+ key: string;
/**
- * Overlap Left
- * @description Overlap between this tile and its left neighbor.
+ * Hash
+ * @description The hash of the model file(s).
*/
- overlap_left: number;
+ hash: string;
/**
- * Overlap Right
- * @description Overlap between this tile and its right neighbor.
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- overlap_right: number;
+ path: string;
/**
- * type
- * @default tile_to_properties_output
- * @constant
+ * File Size
+ * @description The size of the model in bytes.
*/
- type: "tile_to_properties_output";
- };
- /** TileWithImage */
- TileWithImage: {
- tile: components["schemas"]["Tile"];
- image: components["schemas"]["ImageField"];
- };
- /**
- * Tiled Multi-Diffusion Denoise - SD1.5, SDXL
- * @description Tiled Multi-Diffusion denoising.
- *
- * This node handles automatically tiling the input image, and is primarily intended for global refinement of images
- * in tiled upscaling workflows. Future Multi-Diffusion nodes should allow the user to specify custom regions with
- * different parameters for each region to harness the full power of Multi-Diffusion.
- *
- * This node has a similar interface to the `DenoiseLatents` node, but it has a reduced feature set (no IP-Adapter,
- * T2I-Adapter, masking, etc.).
- */
- TiledMultiDiffusionDenoiseLatents: {
+ file_size: number;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Name
+ * @description Name of the model.
*/
- id: string;
+ name: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Description
+ * @description Model description
*/
- is_intermediate?: boolean;
+ description: string | null;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- use_cache?: boolean;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * @description Positive conditioning tensor
- * @default null
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- positive_conditioning?: components["schemas"]["ConditioningField"] | null;
+ source_api_response: string | null;
/**
- * @description Negative conditioning tensor
- * @default null
+ * Cover Image
+ * @description Url for image to preview model
*/
- negative_conditioning?: components["schemas"]["ConditioningField"] | null;
+ cover_image: string | null;
/**
- * @description Noise tensor
- * @default null
+ * Config Path
+ * @description Path to the config for this model, if any.
*/
- noise?: components["schemas"]["LatentsField"] | null;
+ config_path: string | null;
/**
- * @description Latents tensor
- * @default null
+ * Type
+ * @default vae
+ * @constant
*/
- latents?: components["schemas"]["LatentsField"] | null;
+ type: "vae";
/**
- * Tile Height
- * @description Height of the tiles in image space.
- * @default 1024
+ * Format
+ * @default checkpoint
+ * @constant
*/
- tile_height?: number;
+ format: "checkpoint";
/**
- * Tile Width
- * @description Width of the tiles in image space.
- * @default 1024
+ * Base
+ * @default sd-2
+ * @constant
*/
- tile_width?: number;
+ base: "sd-2";
+ };
+ /** VAE_Checkpoint_SDXL_Config */
+ VAE_Checkpoint_SDXL_Config: {
/**
- * Tile Overlap
- * @description The overlap between adjacent tiles in pixel space. (Of course, tile merging is applied in latent space.) Tiles will be cropped during merging (if necessary) to ensure that they overlap by exactly this amount.
- * @default 32
+ * Key
+ * @description A unique key for this model.
*/
- tile_overlap?: number;
+ key: string;
/**
- * Steps
- * @description Number of steps to run
- * @default 18
+ * Hash
+ * @description The hash of the model file(s).
*/
- steps?: number;
+ hash: string;
/**
- * CFG Scale
- * @description Classifier-Free Guidance scale
- * @default 6
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- cfg_scale?: number | number[];
+ path: string;
/**
- * Denoising Start
- * @description When to start denoising, expressed a percentage of total steps
- * @default 0
+ * File Size
+ * @description The size of the model in bytes.
*/
- denoising_start?: number;
+ file_size: number;
/**
- * Denoising End
- * @description When to stop denoising, expressed a percentage of total steps
- * @default 1
+ * Name
+ * @description Name of the model.
*/
- denoising_end?: number;
+ name: string;
/**
- * Scheduler
- * @description Scheduler to use during inference
- * @default euler
- * @enum {string}
+ * Description
+ * @description Model description
*/
- scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd";
+ description: string | null;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
- * @default null
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- unet?: components["schemas"]["UNetField"] | null;
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * CFG Rescale Multiplier
- * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR
- * @default 0
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- cfg_rescale_multiplier?: number;
+ source_api_response: string | null;
/**
- * Control
- * @default null
+ * Cover Image
+ * @description Url for image to preview model
*/
- control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null;
+ cover_image: string | null;
/**
- * type
- * @default tiled_multi_diffusion_denoise_latents
+ * Config Path
+ * @description Path to the config for this model, if any.
+ */
+ config_path: string | null;
+ /**
+ * Type
+ * @default vae
* @constant
*/
- type: "tiled_multi_diffusion_denoise_latents";
- };
- /** TransformerField */
- TransformerField: {
- /** @description Info to load Transformer submodel */
- transformer: components["schemas"]["ModelIdentifierField"];
+ type: "vae";
/**
- * Loras
- * @description LoRAs to apply on model loading
+ * Format
+ * @default checkpoint
+ * @constant
*/
- loras: components["schemas"]["LoRAField"][];
- };
- /**
- * UIComponent
- * @description The type of UI component to use for a field, used to override the default components, which are
- * inferred from the field type.
- * @enum {string}
- */
- UIComponent: "none" | "textarea" | "slider";
- /**
- * UIConfigBase
- * @description Provides additional node configuration to the UI.
- * This is used internally by the @invocation decorator logic. Do not use this directly.
- */
- UIConfigBase: {
+ format: "checkpoint";
/**
- * Tags
- * @description The node's tags
- * @default null
+ * Base
+ * @default sdxl
+ * @constant
*/
- tags: string[] | null;
+ base: "sdxl";
+ };
+ /** VAE_Diffusers_SD1_Config */
+ VAE_Diffusers_SD1_Config: {
/**
- * Title
- * @description The node's display name
- * @default null
+ * Key
+ * @description A unique key for this model.
*/
- title: string | null;
+ key: string;
/**
- * Category
- * @description The node's category
- * @default null
+ * Hash
+ * @description The hash of the model file(s).
*/
- category: string | null;
+ hash: string;
/**
- * Version
- * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
- version: string;
+ path: string;
/**
- * Node Pack
- * @description The node pack that this node belongs to, will be 'invokeai' for built-in nodes
+ * File Size
+ * @description The size of the model in bytes.
*/
- node_pack: string;
+ file_size: number;
/**
- * @description The node's classification
- * @default stable
+ * Name
+ * @description Name of the model.
*/
- classification: components["schemas"]["Classification"];
- };
- /**
- * UIType
- * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type.
- *
- * - Model Fields
- * The most common node-author-facing use will be for model fields. Internally, there is no difference
- * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the
- * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that
- * the field is an SDXL main model field.
- *
- * - Any Field
- * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to
- * indicate that the field accepts any type. Use with caution. This cannot be used on outputs.
- *
- * - Scheduler Field
- * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field.
- *
- * - Internal Fields
- * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate
- * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These
- * should not be used by node authors.
- *
- * - DEPRECATED Fields
- * These types are deprecated and should not be used by node authors. A warning will be logged if one is
- * used, and the type will be ignored. They are included here for backwards compatibility.
- * @enum {string}
- */
- UIType: "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "IsIntermediate" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict" | "DEPRECATED_MainModelField" | "DEPRECATED_CogView4MainModelField" | "DEPRECATED_FluxMainModelField" | "DEPRECATED_SD3MainModelField" | "DEPRECATED_SDXLMainModelField" | "DEPRECATED_SDXLRefinerModelField" | "DEPRECATED_ONNXModelField" | "DEPRECATED_VAEModelField" | "DEPRECATED_FluxVAEModelField" | "DEPRECATED_LoRAModelField" | "DEPRECATED_ControlNetModelField" | "DEPRECATED_IPAdapterModelField" | "DEPRECATED_T2IAdapterModelField" | "DEPRECATED_T5EncoderModelField" | "DEPRECATED_CLIPEmbedModelField" | "DEPRECATED_CLIPLEmbedModelField" | "DEPRECATED_CLIPGEmbedModelField" | "DEPRECATED_SpandrelImageToImageModelField" | "DEPRECATED_ControlLoRAModelField" | "DEPRECATED_SigLipModelField" | "DEPRECATED_FluxReduxModelField" | "DEPRECATED_LLaVAModelField" | "DEPRECATED_Imagen3ModelField" | "DEPRECATED_Imagen4ModelField" | "DEPRECATED_ChatGPT4oModelField" | "DEPRECATED_Gemini2_5ModelField" | "DEPRECATED_FluxKontextModelField" | "DEPRECATED_Veo3ModelField" | "DEPRECATED_RunwayModelField";
- /** UNetField */
- UNetField: {
- /** @description Info to load unet submodel */
- unet: components["schemas"]["ModelIdentifierField"];
- /** @description Info to load scheduler submodel */
- scheduler: components["schemas"]["ModelIdentifierField"];
+ name: string;
/**
- * Loras
- * @description LoRAs to apply on model loading
+ * Description
+ * @description Model description
*/
- loras: components["schemas"]["LoRAField"][];
+ description: string | null;
/**
- * Seamless Axes
- * @description Axes("x" and "y") to which apply seamless
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
*/
- seamless_axes?: string[];
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
/**
- * @description FreeU configuration
- * @default null
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
*/
- freeu_config?: components["schemas"]["FreeUConfig"] | null;
- };
- /**
- * UNetOutput
- * @description Base class for invocations that output a UNet field.
- */
- UNetOutput: {
+ source_api_response: string | null;
/**
- * UNet
- * @description UNet (scheduler, LoRAs)
+ * Cover Image
+ * @description Url for image to preview model
*/
- unet: components["schemas"]["UNetField"];
+ cover_image: string | null;
/**
- * type
- * @default unet_output
+ * Format
+ * @default diffusers
* @constant
*/
- type: "unet_output";
- };
- /**
- * URLModelSource
- * @description A generic URL point to a checkpoint file.
- */
- URLModelSource: {
- /**
- * Url
- * Format: uri
- */
- url: string;
- /** Access Token */
- access_token?: string | null;
- /**
- * @description discriminator enum property added by openapi-typescript
- * @enum {string}
- */
- type: "url";
- };
- /** URLRegexTokenPair */
- URLRegexTokenPair: {
+ format: "diffusers";
+ /** @default */
+ repo_variant: components["schemas"]["ModelRepoVariant"];
/**
- * Url Regex
- * @description Regular expression to match against the URL
+ * Type
+ * @default vae
+ * @constant
*/
- url_regex: string;
+ type: "vae";
/**
- * Token
- * @description Token to use when the URL matches the regex
+ * Base
+ * @default sd-1
+ * @constant
*/
- token: string;
+ base: "sd-1";
};
- /**
- * Unknown_Config
- * @description Model config for unknown models, used as a fallback when we cannot positively identify a model.
- */
- Unknown_Config: {
+ /** VAE_Diffusers_SDXL_Config */
+ VAE_Diffusers_SDXL_Config: {
/**
* Key
* @description A unique key for this model.
@@ -24596,1783 +24053,346 @@ export type components = {
*/
cover_image: string | null;
/**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Base
- * @default unknown
+ * Format
+ * @default diffusers
* @constant
*/
- base: "unknown";
+ format: "diffusers";
+ /** @default */
+ repo_variant: components["schemas"]["ModelRepoVariant"];
/**
* Type
- * @default unknown
+ * @default vae
* @constant
*/
- type: "unknown";
+ type: "vae";
/**
- * Format
- * @default unknown
+ * Base
+ * @default sdxl
* @constant
*/
- format: "unknown";
+ base: "sdxl";
};
- /**
- * Unsharp Mask
- * @description Applies an unsharp mask filter to an image
- */
- UnsharpMaskInvocation: {
- /**
- * @description The board to save the image to
- * @default null
- */
- board?: components["schemas"]["BoardField"] | null;
+ /** ValidationError */
+ ValidationError: {
+ /** Location */
+ loc: (string | number)[];
+ /** Message */
+ msg: string;
+ /** Error Type */
+ type: string;
+ };
+ /** Workflow */
+ Workflow: {
/**
- * @description Optional metadata to be saved with the image
- * @default null
+ * Name
+ * @description The name of the workflow.
*/
- metadata?: components["schemas"]["MetadataField"] | null;
+ name: string;
/**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ * Author
+ * @description The author of the workflow.
*/
- id: string;
+ author: string;
/**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
+ * Description
+ * @description The description of the workflow.
*/
- is_intermediate?: boolean;
+ description: string;
/**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
+ * Version
+ * @description The version of the workflow.
*/
- use_cache?: boolean;
+ version: string;
/**
- * @description The image to use
- * @default null
+ * Contact
+ * @description The contact of the workflow.
*/
- image?: components["schemas"]["ImageField"] | null;
+ contact: string;
/**
- * Radius
- * @description Unsharp mask radius
- * @default 2
+ * Tags
+ * @description The tags of the workflow.
*/
- radius?: number;
+ tags: string;
/**
- * Strength
- * @description Unsharp mask strength
- * @default 50
+ * Notes
+ * @description The notes of the workflow.
*/
- strength?: number;
+ notes: string;
/**
- * type
- * @default unsharp_mask
- * @constant
+ * Exposedfields
+ * @description The exposed fields of the workflow.
*/
- type: "unsharp_mask";
- };
- /** UnstarredImagesResult */
- UnstarredImagesResult: {
+ exposedFields: components["schemas"]["ExposedField"][];
+ /** @description The meta of the workflow. */
+ meta: components["schemas"]["WorkflowMeta"];
/**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
+ * Nodes
+ * @description The nodes of the workflow.
*/
- affected_boards: string[];
+ nodes: {
+ [key: string]: components["schemas"]["JsonValue"];
+ }[];
/**
- * Unstarred Images
- * @description The names of the images that were unstarred
+ * Edges
+ * @description The edges of the workflow.
*/
- unstarred_images: string[];
- };
- /** UnstarredVideosResult */
- UnstarredVideosResult: {
+ edges: {
+ [key: string]: components["schemas"]["JsonValue"];
+ }[];
/**
- * Affected Boards
- * @description The ids of boards affected by the delete operation
+ * Form
+ * @description The form of the workflow.
*/
- affected_boards: string[];
+ form?: {
+ [key: string]: components["schemas"]["JsonValue"];
+ } | null;
/**
- * Unstarred Videos
- * @description The ids of the videos that were unstarred
+ * Id
+ * @description The id of the workflow.
*/
- unstarred_videos: string[];
+ id: string;
};
- /** Upscaler */
- Upscaler: {
+ /** WorkflowAndGraphResponse */
+ WorkflowAndGraphResponse: {
/**
- * Upscaling Method
- * @description Name of upscaling method
+ * Workflow
+ * @description The workflow used to generate the image, as stringified JSON
*/
- upscaling_method: string;
+ workflow: string | null;
/**
- * Upscaling Models
- * @description List of upscaling models for this method
- */
- upscaling_models: string[];
- };
- /** VAEField */
- VAEField: {
- /** @description Info to load vae submodel */
- vae: components["schemas"]["ModelIdentifierField"];
- /**
- * Seamless Axes
- * @description Axes("x" and "y") to which apply seamless
+ * Graph
+ * @description The graph used to generate the image, as stringified JSON
*/
- seamless_axes?: string[];
+ graph: string | null;
};
/**
- * VAE Model - SD1.5, SD2, SDXL, SD3, FLUX
- * @description Loads a VAE model, outputting a VaeLoaderOutput
+ * WorkflowCategory
+ * @enum {string}
*/
- VAELoaderInvocation: {
- /**
- * Id
- * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
- */
- id: string;
- /**
- * Is Intermediate
- * @description Whether or not this is an intermediate invocation.
- * @default false
- */
- is_intermediate?: boolean;
- /**
- * Use Cache
- * @description Whether or not to use the cache
- * @default true
- */
- use_cache?: boolean;
- /**
- * VAE
- * @description VAE model to load
- * @default null
- */
- vae_model?: components["schemas"]["ModelIdentifierField"] | null;
+ WorkflowCategory: "user" | "default";
+ /** WorkflowMeta */
+ WorkflowMeta: {
/**
- * type
- * @default vae_loader
- * @constant
+ * Version
+ * @description The version of the workflow schema.
*/
- type: "vae_loader";
+ version: string;
+ /** @description The category of the workflow (user or default). */
+ category: components["schemas"]["WorkflowCategory"];
};
- /**
- * VAEOutput
- * @description Base class for invocations that output a VAE field
- */
- VAEOutput: {
+ /** WorkflowRecordDTO */
+ WorkflowRecordDTO: {
/**
- * VAE
- * @description VAE
+ * Workflow Id
+ * @description The id of the workflow.
*/
- vae: components["schemas"]["VAEField"];
+ workflow_id: string;
/**
- * type
- * @default vae_output
- * @constant
+ * Name
+ * @description The name of the workflow.
*/
- type: "vae_output";
- };
- /** VAE_Checkpoint_FLUX_Config */
- VAE_Checkpoint_FLUX_Config: {
+ name: string;
/**
- * Key
- * @description A unique key for this model.
+ * Created At
+ * @description The created timestamp of the workflow.
*/
- key: string;
+ created_at: string;
/**
- * Hash
- * @description The hash of the model file(s).
+ * Updated At
+ * @description The updated timestamp of the workflow.
*/
- hash: string;
+ updated_at: string;
/**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ * Opened At
+ * @description The opened timestamp of the workflow.
*/
- path: string;
+ opened_at?: string | null;
+ /** @description The workflow. */
+ workflow: components["schemas"]["Workflow"];
+ };
+ /** WorkflowRecordListItemWithThumbnailDTO */
+ WorkflowRecordListItemWithThumbnailDTO: {
/**
- * File Size
- * @description The size of the model in bytes.
+ * Workflow Id
+ * @description The id of the workflow.
*/
- file_size: number;
+ workflow_id: string;
/**
* Name
- * @description Name of the model.
+ * @description The name of the workflow.
*/
name: string;
/**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
+ * Created At
+ * @description The created timestamp of the workflow.
*/
- cover_image: string | null;
+ created_at: string;
/**
- * Usage Info
- * @description Usage information for this model
+ * Updated At
+ * @description The updated timestamp of the workflow.
*/
- usage_info: string | null;
+ updated_at: string;
/**
- * Config Path
- * @description Path to the config for this model, if any.
+ * Opened At
+ * @description The opened timestamp of the workflow.
*/
- config_path: string | null;
+ opened_at?: string | null;
/**
- * Type
- * @default vae
- * @constant
+ * Description
+ * @description The description of the workflow.
*/
- type: "vae";
+ description: string;
+ /** @description The description of the workflow. */
+ category: components["schemas"]["WorkflowCategory"];
/**
- * Format
- * @default checkpoint
- * @constant
+ * Tags
+ * @description The tags of the workflow.
*/
- format: "checkpoint";
+ tags: string;
/**
- * Base
- * @default flux
- * @constant
+ * Thumbnail Url
+ * @description The URL of the workflow thumbnail.
*/
- base: "flux";
+ thumbnail_url?: string | null;
};
- /** VAE_Checkpoint_SD1_Config */
- VAE_Checkpoint_SD1_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
- /**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
+ /**
+ * WorkflowRecordOrderBy
+ * @description The order by options for workflow records
+ * @enum {string}
+ */
+ WorkflowRecordOrderBy: "created_at" | "updated_at" | "opened_at" | "name";
+ /** WorkflowRecordWithThumbnailDTO */
+ WorkflowRecordWithThumbnailDTO: {
/**
- * File Size
- * @description The size of the model in bytes.
+ * Workflow Id
+ * @description The id of the workflow.
*/
- file_size: number;
+ workflow_id: string;
/**
* Name
- * @description Name of the model.
+ * @description The name of the workflow.
*/
name: string;
/**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Config Path
- * @description Path to the config for this model, if any.
- */
- config_path: string | null;
- /**
- * Type
- * @default vae
- * @constant
- */
- type: "vae";
- /**
- * Format
- * @default checkpoint
- * @constant
+ * Created At
+ * @description The created timestamp of the workflow.
*/
- format: "checkpoint";
+ created_at: string;
/**
- * Base
- * @default sd-1
- * @constant
+ * Updated At
+ * @description The updated timestamp of the workflow.
*/
- base: "sd-1";
- };
- /** VAE_Checkpoint_SD2_Config */
- VAE_Checkpoint_SD2_Config: {
+ updated_at: string;
/**
- * Key
- * @description A unique key for this model.
+ * Opened At
+ * @description The opened timestamp of the workflow.
*/
- key: string;
+ opened_at?: string | null;
+ /** @description The workflow. */
+ workflow: components["schemas"]["Workflow"];
/**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
- /**
- * Name
- * @description Name of the model.
- */
- name: string;
- /**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Config Path
- * @description Path to the config for this model, if any.
- */
- config_path: string | null;
- /**
- * Type
- * @default vae
- * @constant
- */
- type: "vae";
- /**
- * Format
- * @default checkpoint
- * @constant
- */
- format: "checkpoint";
- /**
- * Base
- * @default sd-2
- * @constant
- */
- base: "sd-2";
- };
- /** VAE_Checkpoint_SDXL_Config */
- VAE_Checkpoint_SDXL_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
- /**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
- /**
- * Name
- * @description Name of the model.
- */
- name: string;
- /**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Config Path
- * @description Path to the config for this model, if any.
- */
- config_path: string | null;
- /**
- * Type
- * @default vae
- * @constant
- */
- type: "vae";
- /**
- * Format
- * @default checkpoint
- * @constant
- */
- format: "checkpoint";
- /**
- * Base
- * @default sdxl
- * @constant
- */
- base: "sdxl";
- };
- /** VAE_Diffusers_SD1_Config */
- VAE_Diffusers_SD1_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
- /**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
- /**
- * Name
- * @description Name of the model.
- */
- name: string;
- /**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Format
- * @default diffusers
- * @constant
- */
- format: "diffusers";
- /** @default */
- repo_variant: components["schemas"]["ModelRepoVariant"];
- /**
- * Type
- * @default vae
- * @constant
- */
- type: "vae";
- /**
- * Base
- * @default sd-1
- * @constant
- */
- base: "sd-1";
- };
- /** VAE_Diffusers_SDXL_Config */
- VAE_Diffusers_SDXL_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
- /**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
- /**
- * Name
- * @description Name of the model.
- */
- name: string;
- /**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Format
- * @default diffusers
- * @constant
- */
- format: "diffusers";
- /** @default */
- repo_variant: components["schemas"]["ModelRepoVariant"];
- /**
- * Type
- * @default vae
- * @constant
- */
- type: "vae";
- /**
- * Base
- * @default sdxl
- * @constant
- */
- base: "sdxl";
- };
- /** ValidationError */
- ValidationError: {
- /** Location */
- loc: (string | number)[];
- /** Message */
- msg: string;
- /** Error Type */
- type: string;
- };
- /** ValidationRunData */
- ValidationRunData: {
- /**
- * Workflow Id
- * @description The id of the workflow being published.
- */
- workflow_id: string;
- /**
- * Input Fields
- * @description The input fields for the published workflow
- */
- input_fields: components["schemas"]["FieldIdentifier"][];
- /**
- * Output Fields
- * @description The output fields for the published workflow
- */
- output_fields: components["schemas"]["FieldIdentifier"][];
- };
- /**
- * VideoDTO
- * @description Deserialized video record, enriched for the frontend.
- */
- VideoDTO: {
- /**
- * Video Id
- * @description The unique id of the video.
- */
- video_id: string;
- /**
- * Video Url
- * @description The URL of the video.
- */
- video_url: string;
- /**
- * Thumbnail Url
- * @description The URL of the video's thumbnail.
- */
- thumbnail_url: string;
- /**
- * Width
- * @description The width of the video in px.
- */
- width: number;
- /**
- * Height
- * @description The height of the video in px.
- */
- height: number;
- /**
- * Created At
- * @description The created timestamp of the video.
- */
- created_at: string;
- /**
- * Updated At
- * @description The updated timestamp of the video.
- */
- updated_at: string;
- /**
- * Deleted At
- * @description The deleted timestamp of the video.
- */
- deleted_at?: string | null;
- /**
- * Is Intermediate
- * @description Whether this is an intermediate video.
- */
- is_intermediate: boolean;
- /**
- * Session Id
- * @description The session ID that generated this video, if it is a generated video.
- */
- session_id?: string | null;
- /**
- * Node Id
- * @description The node ID that generated this video, if it is a generated video.
- */
- node_id?: string | null;
- /**
- * Starred
- * @description Whether this video is starred.
- */
- starred: boolean;
- /**
- * Board Id
- * @description The id of the board the image belongs to, if one exists.
- */
- board_id?: string | null;
- };
- /**
- * VideoField
- * @description A video primitive field
- */
- VideoField: {
- /**
- * Video Id
- * @description The id of the video
- */
- video_id: string;
- };
- /**
- * VideoIdsResult
- * @description Response containing ordered video ids with metadata for optimistic updates.
- */
- VideoIdsResult: {
- /**
- * Video Ids
- * @description Ordered list of video ids
- */
- video_ids: string[];
- /**
- * Starred Count
- * @description Number of starred videos (when starred_first=True)
- */
- starred_count: number;
- /**
- * Total Count
- * @description Total number of videos matching the query
- */
- total_count: number;
- };
- /**
- * VideoOutput
- * @description Base class for nodes that output a video
- */
- VideoOutput: {
- /** @description The output video */
- video: components["schemas"]["VideoField"];
- /**
- * Width
- * @description The width of the video in pixels
- */
- width: number;
- /**
- * Height
- * @description The height of the video in pixels
- */
- height: number;
- /**
- * Duration Seconds
- * @description The duration of the video in seconds
- */
- duration_seconds: number;
- /**
- * type
- * @default video_output
- * @constant
- */
- type: "video_output";
- };
- /**
- * VideoRecordChanges
- * @description A set of changes to apply to a video record.
- *
- * Only limited changes are valid:
- * - `session_id`: change the session associated with a video
- * - `is_intermediate`: change the video's `is_intermediate` flag
- * - `starred`: change whether the video is starred
- */
- VideoRecordChanges: {
- /**
- * Session Id
- * @description The video's new session ID.
- */
- session_id?: string | null;
- /**
- * Is Intermediate
- * @description The video's new `is_intermediate` flag.
- */
- is_intermediate?: boolean | null;
- /**
- * Starred
- * @description The video's new `starred` state
- */
- starred?: boolean | null;
- };
- /** Video_ExternalAPI_Runway_Config */
- Video_ExternalAPI_Runway_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
- /**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
- /**
- * Name
- * @description Name of the model.
- */
- name: string;
- /**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Type
- * @default video
- * @constant
- */
- type: "video";
- /**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
- */
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
- /**
- * Format
- * @default api
- * @constant
- */
- format: "api";
- /**
- * Base
- * @default runway
- * @constant
- */
- base: "runway";
- };
- /** Video_ExternalAPI_Veo3_Config */
- Video_ExternalAPI_Veo3_Config: {
- /**
- * Key
- * @description A unique key for this model.
- */
- key: string;
- /**
- * Hash
- * @description The hash of the model file(s).
- */
- hash: string;
- /**
- * Path
- * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
- */
- path: string;
- /**
- * File Size
- * @description The size of the model in bytes.
- */
- file_size: number;
- /**
- * Name
- * @description Name of the model.
- */
- name: string;
- /**
- * Description
- * @description Model description
- */
- description: string | null;
- /**
- * Source
- * @description The original source of the model (path, URL or repo_id).
- */
- source: string;
- /** @description The type of source */
- source_type: components["schemas"]["ModelSourceType"];
- /**
- * Source Api Response
- * @description The original API response from the source, as stringified JSON.
- */
- source_api_response: string | null;
- /**
- * Cover Image
- * @description Url for image to preview model
- */
- cover_image: string | null;
- /**
- * Usage Info
- * @description Usage information for this model
- */
- usage_info: string | null;
- /**
- * Type
- * @default video
- * @constant
- */
- type: "video";
- /**
- * Trigger Phrases
- * @description Set of trigger phrases for this model
- */
- trigger_phrases: string[] | null;
- /** @description Default settings for this model */
- default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
- /**
- * Format
- * @default api
- * @constant
- */
- format: "api";
- /**
- * Base
- * @default veo3
- * @constant
- */
- base: "veo3";
- };
- /** Workflow */
- Workflow: {
- /**
- * Name
- * @description The name of the workflow.
- */
- name: string;
- /**
- * Author
- * @description The author of the workflow.
- */
- author: string;
- /**
- * Description
- * @description The description of the workflow.
- */
- description: string;
- /**
- * Version
- * @description The version of the workflow.
- */
- version: string;
- /**
- * Contact
- * @description The contact of the workflow.
- */
- contact: string;
- /**
- * Tags
- * @description The tags of the workflow.
- */
- tags: string;
- /**
- * Notes
- * @description The notes of the workflow.
- */
- notes: string;
- /**
- * Exposedfields
- * @description The exposed fields of the workflow.
- */
- exposedFields: components["schemas"]["ExposedField"][];
- /** @description The meta of the workflow. */
- meta: components["schemas"]["WorkflowMeta"];
- /**
- * Nodes
- * @description The nodes of the workflow.
- */
- nodes: {
- [key: string]: components["schemas"]["JsonValue"];
- }[];
- /**
- * Edges
- * @description The edges of the workflow.
- */
- edges: {
- [key: string]: components["schemas"]["JsonValue"];
- }[];
- /**
- * Form
- * @description The form of the workflow.
- */
- form?: {
- [key: string]: components["schemas"]["JsonValue"];
- } | null;
- /**
- * Is Published
- * @description Whether the workflow is published or not.
- */
- is_published?: boolean | null;
- /**
- * Id
- * @description The id of the workflow.
- */
- id: string;
- };
- /** WorkflowAndGraphResponse */
- WorkflowAndGraphResponse: {
- /**
- * Workflow
- * @description The workflow used to generate the image, as stringified JSON
- */
- workflow: string | null;
- /**
- * Graph
- * @description The graph used to generate the image, as stringified JSON
- */
- graph: string | null;
- };
- /**
- * WorkflowCategory
- * @enum {string}
- */
- WorkflowCategory: "user" | "default" | "project";
- /** WorkflowMeta */
- WorkflowMeta: {
- /**
- * Version
- * @description The version of the workflow schema.
- */
- version: string;
- /** @description The category of the workflow (user or default). */
- category: components["schemas"]["WorkflowCategory"];
- };
- /** WorkflowRecordDTO */
- WorkflowRecordDTO: {
- /**
- * Workflow Id
- * @description The id of the workflow.
- */
- workflow_id: string;
- /**
- * Name
- * @description The name of the workflow.
- */
- name: string;
- /**
- * Created At
- * @description The created timestamp of the workflow.
- */
- created_at: string;
- /**
- * Updated At
- * @description The updated timestamp of the workflow.
- */
- updated_at: string;
- /**
- * Opened At
- * @description The opened timestamp of the workflow.
- */
- opened_at?: string | null;
- /**
- * Is Published
- * @description Whether the workflow is published or not.
- */
- is_published?: boolean | null;
- /** @description The workflow. */
- workflow: components["schemas"]["Workflow"];
- };
- /** WorkflowRecordListItemWithThumbnailDTO */
- WorkflowRecordListItemWithThumbnailDTO: {
- /**
- * Workflow Id
- * @description The id of the workflow.
- */
- workflow_id: string;
- /**
- * Name
- * @description The name of the workflow.
- */
- name: string;
- /**
- * Created At
- * @description The created timestamp of the workflow.
- */
- created_at: string;
- /**
- * Updated At
- * @description The updated timestamp of the workflow.
- */
- updated_at: string;
- /**
- * Opened At
- * @description The opened timestamp of the workflow.
- */
- opened_at?: string | null;
- /**
- * Is Published
- * @description Whether the workflow is published or not.
- */
- is_published?: boolean | null;
- /**
- * Description
- * @description The description of the workflow.
- */
- description: string;
- /** @description The description of the workflow. */
- category: components["schemas"]["WorkflowCategory"];
- /**
- * Tags
- * @description The tags of the workflow.
- */
- tags: string;
- /**
- * Thumbnail Url
- * @description The URL of the workflow thumbnail.
- */
- thumbnail_url?: string | null;
- };
- /**
- * WorkflowRecordOrderBy
- * @description The order by options for workflow records
- * @enum {string}
- */
- WorkflowRecordOrderBy: "created_at" | "updated_at" | "opened_at" | "name";
- /** WorkflowRecordWithThumbnailDTO */
- WorkflowRecordWithThumbnailDTO: {
- /**
- * Workflow Id
- * @description The id of the workflow.
- */
- workflow_id: string;
- /**
- * Name
- * @description The name of the workflow.
- */
- name: string;
- /**
- * Created At
- * @description The created timestamp of the workflow.
- */
- created_at: string;
- /**
- * Updated At
- * @description The updated timestamp of the workflow.
- */
- updated_at: string;
- /**
- * Opened At
- * @description The opened timestamp of the workflow.
- */
- opened_at?: string | null;
- /**
- * Is Published
- * @description Whether the workflow is published or not.
- */
- is_published?: boolean | null;
- /** @description The workflow. */
- workflow: components["schemas"]["Workflow"];
- /**
- * Thumbnail Url
- * @description The URL of the workflow thumbnail.
- */
- thumbnail_url?: string | null;
- };
- /** WorkflowWithoutID */
- WorkflowWithoutID: {
- /**
- * Name
- * @description The name of the workflow.
- */
- name: string;
- /**
- * Author
- * @description The author of the workflow.
- */
- author: string;
- /**
- * Description
- * @description The description of the workflow.
- */
- description: string;
- /**
- * Version
- * @description The version of the workflow.
- */
- version: string;
- /**
- * Contact
- * @description The contact of the workflow.
- */
- contact: string;
- /**
- * Tags
- * @description The tags of the workflow.
- */
- tags: string;
- /**
- * Notes
- * @description The notes of the workflow.
- */
- notes: string;
- /**
- * Exposedfields
- * @description The exposed fields of the workflow.
- */
- exposedFields: components["schemas"]["ExposedField"][];
- /** @description The meta of the workflow. */
- meta: components["schemas"]["WorkflowMeta"];
- /**
- * Nodes
- * @description The nodes of the workflow.
- */
- nodes: {
- [key: string]: components["schemas"]["JsonValue"];
- }[];
- /**
- * Edges
- * @description The edges of the workflow.
- */
- edges: {
- [key: string]: components["schemas"]["JsonValue"];
- }[];
- /**
- * Form
- * @description The form of the workflow.
- */
- form?: {
- [key: string]: components["schemas"]["JsonValue"];
- } | null;
- /**
- * Is Published
- * @description Whether the workflow is published or not.
- */
- is_published?: boolean | null;
- };
- };
- responses: never;
- parameters: never;
- requestBodies: never;
- headers: never;
- pathItems: never;
-};
-export type $defs = Record;
-export interface operations {
- parse_dynamicprompts: {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_parse_dynamicprompts"];
- };
- };
- responses: {
- /** @description Successful Response */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["DynamicPromptsResponse"];
- };
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- list_model_records: {
- parameters: {
- query?: {
- /** @description Base models to include */
- base_models?: components["schemas"]["BaseModelType"][] | null;
- /** @description The type of model to get */
- model_type?: components["schemas"]["ModelType"] | null;
- /** @description Exact match on the name of the model */
- model_name?: string | null;
- /** @description Exact match on the format of the model (e.g. 'diffusers') */
- model_format?: components["schemas"]["ModelFormat"] | null;
- };
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Successful Response */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["ModelsList"];
- };
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- get_model_records_by_attrs: {
- parameters: {
- query: {
- /** @description The name of the model */
- name: string;
- /** @description The type of the model */
- type: components["schemas"]["ModelType"];
- /** @description The base model of the model */
- base: components["schemas"]["BaseModelType"];
- };
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Successful Response */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
- };
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- get_model_record: {
- parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description Key of the model record to fetch. */
- key: string;
- };
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description The model configuration was retrieved successfully */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- /** @example {
- * "path": "string",
- * "name": "string",
- * "base": "sd-1",
- * "type": "main",
- * "format": "checkpoint",
- * "config_path": "string",
- * "key": "string",
- * "hash": "string",
- * "file_size": 1,
- * "description": "string",
- * "source": "string",
- * "converted_at": 0,
- * "variant": "normal",
- * "prediction_type": "epsilon",
- * "repo_variant": "fp16",
- * "upcast_attention": false
- * } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
- };
- };
- /** @description Bad request */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description The model could not be found */
- 404: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- delete_model: {
- parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description Unique key of model to remove from model registry. */
- key: string;
- };
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Model deleted successfully */
- 204: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Model not found */
- 404: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- update_model_record: {
- parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description Unique key of model */
- key: string;
- };
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["ModelRecordChanges"];
- };
- };
- responses: {
- /** @description The model was updated successfully */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- /** @example {
- * "path": "string",
- * "name": "string",
- * "base": "sd-1",
- * "type": "main",
- * "format": "checkpoint",
- * "config_path": "string",
- * "key": "string",
- * "hash": "string",
- * "file_size": 1,
- * "description": "string",
- * "source": "string",
- * "converted_at": 0,
- * "variant": "normal",
- * "prediction_type": "epsilon",
- * "repo_variant": "fp16",
- * "upcast_attention": false
- * } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
- };
- };
- /** @description Bad request */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description The model could not be found */
- 404: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description There is already a model corresponding to the new name */
- 409: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- scan_for_models: {
- parameters: {
- query?: {
- /** @description Directory path to search for models */
- scan_path?: string;
- };
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Directory scanned successfully */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["FoundModel"][];
- };
- };
- /** @description Invalid directory path */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- get_hugging_face_models: {
- parameters: {
- query?: {
- /** @description Hugging face repo to search for models */
- hugging_face_repo?: string;
- };
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Hugging Face repo scanned successfully */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HuggingFaceModels"];
- };
- };
- /** @description Invalid hugging face repo */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- get_model_image: {
- parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description The name of model image file to get */
- key: string;
- };
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description The model image was fetched successfully */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": unknown;
- };
- };
- /** @description Bad request */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description The model image could not be found */
- 404: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- delete_model_image: {
- parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description Unique key of model image to remove from model_images directory. */
- key: string;
- };
- cookie?: never;
+ * Thumbnail Url
+ * @description The URL of the workflow thumbnail.
+ */
+ thumbnail_url?: string | null;
};
- requestBody?: never;
- responses: {
- /** @description Model image deleted successfully */
- 204: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Model image not found */
- 404: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
+ /** WorkflowWithoutID */
+ WorkflowWithoutID: {
+ /**
+ * Name
+ * @description The name of the workflow.
+ */
+ name: string;
+ /**
+ * Author
+ * @description The author of the workflow.
+ */
+ author: string;
+ /**
+ * Description
+ * @description The description of the workflow.
+ */
+ description: string;
+ /**
+ * Version
+ * @description The version of the workflow.
+ */
+ version: string;
+ /**
+ * Contact
+ * @description The contact of the workflow.
+ */
+ contact: string;
+ /**
+ * Tags
+ * @description The tags of the workflow.
+ */
+ tags: string;
+ /**
+ * Notes
+ * @description The notes of the workflow.
+ */
+ notes: string;
+ /**
+ * Exposedfields
+ * @description The exposed fields of the workflow.
+ */
+ exposedFields: components["schemas"]["ExposedField"][];
+ /** @description The meta of the workflow. */
+ meta: components["schemas"]["WorkflowMeta"];
+ /**
+ * Nodes
+ * @description The nodes of the workflow.
+ */
+ nodes: {
+ [key: string]: components["schemas"]["JsonValue"];
+ }[];
+ /**
+ * Edges
+ * @description The edges of the workflow.
+ */
+ edges: {
+ [key: string]: components["schemas"]["JsonValue"];
+ }[];
+ /**
+ * Form
+ * @description The form of the workflow.
+ */
+ form?: {
+ [key: string]: components["schemas"]["JsonValue"];
+ } | null;
};
};
- update_model_image: {
+ responses: never;
+ parameters: never;
+ requestBodies: never;
+ headers: never;
+ pathItems: never;
+};
+export type $defs = Record;
+export interface operations {
+ parse_dynamicprompts: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description Unique key of model */
- key: string;
- };
+ path?: never;
cookie?: never;
};
requestBody: {
content: {
- "multipart/form-data": components["schemas"]["Body_update_model_image"];
+ "application/json": components["schemas"]["Body_parse_dynamicprompts"];
};
};
responses: {
- /** @description The model image was updated successfully */
+ /** @description Successful Response */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": unknown;
- };
- };
- /** @description Bad request */
- 400: {
- headers: {
- [name: string]: unknown;
+ "application/json": components["schemas"]["DynamicPromptsResponse"];
};
- content?: never;
};
/** @description Validation Error */
422: {
@@ -26385,9 +24405,18 @@ export interface operations {
};
};
};
- list_model_installs: {
+ list_model_records: {
parameters: {
- query?: never;
+ query?: {
+ /** @description Base models to include */
+ base_models?: components["schemas"]["BaseModelType"][] | null;
+ /** @description The type of model to get */
+ model_type?: components["schemas"]["ModelType"] | null;
+ /** @description Exact match on the name of the model */
+ model_name?: string | null;
+ /** @description Exact match on the format of the model (e.g. 'diffusers') */
+ model_format?: components["schemas"]["ModelFormat"] | null;
+ };
header?: never;
path?: never;
cookie?: never;
@@ -26400,53 +24429,8 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ModelInstallJob"][];
- };
- };
- };
- };
- install_model: {
- parameters: {
- query: {
- /** @description Model source to install, can be a local path, repo_id, or remote URL */
- source: string;
- /** @description Whether or not to install a local model in place */
- inplace?: boolean | null;
- /** @description access token for the remote resource */
- access_token?: string | null;
- };
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["ModelRecordChanges"];
- };
- };
- responses: {
- /** @description The model imported successfully */
- 201: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["ModelInstallJob"];
- };
- };
- /** @description There is already a model corresponding to this path or repo_id */
- 409: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Unrecognized file/folder format */
- 415: {
- headers: {
- [name: string]: unknown;
+ "application/json": components["schemas"]["ModelsList"];
};
- content?: never;
};
/** @description Validation Error */
422: {
@@ -26457,54 +24441,17 @@ export interface operations {
"application/json": components["schemas"]["HTTPValidationError"];
};
};
- /** @description The model appeared to import successfully, but could not be found in the model manager */
- 424: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- };
- };
- prune_model_install_jobs: {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Successful Response */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": unknown;
- };
- };
- /** @description All completed and errored jobs have been pruned */
- 204: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description Bad request */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
};
};
- install_hugging_face_model: {
+ get_model_records_by_attrs: {
parameters: {
query: {
- /** @description HuggingFace repo_id to install */
- source: string;
+ /** @description The name of the model */
+ name: string;
+ /** @description The type of the model */
+ type: components["schemas"]["ModelType"];
+ /** @description The base model of the model */
+ base: components["schemas"]["BaseModelType"];
};
header?: never;
path?: never;
@@ -26512,28 +24459,14 @@ export interface operations {
};
requestBody?: never;
responses: {
- /** @description The model is being installed */
- 201: {
+ /** @description Successful Response */
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "text/html": string;
- };
- };
- /** @description Bad request */
- 400: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description There is already a model corresponding to this path or repo_id */
- 409: {
- headers: {
- [name: string]: unknown;
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
};
- content?: never;
};
/** @description Validation Error */
422: {
@@ -26546,28 +24479,53 @@ export interface operations {
};
};
};
- get_model_install_job: {
+ get_model_record: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description Model install id */
- id: number;
+ path: {
+ /** @description Key of the model record to fetch. */
+ key: string;
};
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Success */
+ /** @description The model configuration was retrieved successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ModelInstallJob"];
+ /** @example {
+ * "path": "string",
+ * "name": "string",
+ * "base": "sd-1",
+ * "type": "main",
+ * "format": "checkpoint",
+ * "config_path": "string",
+ * "key": "string",
+ * "hash": "string",
+ * "file_size": 1,
+ * "description": "string",
+ * "source": "string",
+ * "converted_at": 0,
+ * "variant": "normal",
+ * "prediction_type": "epsilon",
+ * "repo_variant": "fp16",
+ * "upcast_attention": false
+ * } */
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
};
};
- /** @description No such job */
+ /** @description Bad request */
+ 400: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description The model could not be found */
404: {
headers: {
[name: string]: unknown;
@@ -26585,29 +24543,27 @@ export interface operations {
};
};
};
- cancel_model_install_job: {
+ delete_model: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description Model install job ID */
- id: number;
+ /** @description Unique key of model to remove from model registry. */
+ key: string;
};
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description The job was cancelled successfully */
- 201: {
+ /** @description Model deleted successfully */
+ 204: {
headers: {
[name: string]: unknown;
};
- content: {
- "application/json": unknown;
- };
+ content?: never;
};
- /** @description No such job */
- 415: {
+ /** @description Model not found */
+ 404: {
headers: {
[name: string]: unknown;
};
@@ -26624,19 +24580,23 @@ export interface operations {
};
};
};
- convert_model: {
+ update_model_record: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description Unique key of the safetensors main model to convert to diffusers format. */
+ /** @description Unique key of model */
key: string;
};
cookie?: never;
};
- requestBody?: never;
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ModelRecordChanges"];
+ };
+ };
responses: {
- /** @description Model converted successfully */
+ /** @description The model was updated successfully */
200: {
headers: {
[name: string]: unknown;
@@ -26660,7 +24620,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Bad request */
@@ -26670,14 +24630,14 @@ export interface operations {
};
content?: never;
};
- /** @description Model not found */
+ /** @description The model could not be found */
404: {
headers: {
[name: string]: unknown;
};
content?: never;
};
- /** @description There is already a model registered at this location */
+ /** @description There is already a model corresponding to the new name */
409: {
headers: {
[name: string]: unknown;
@@ -26695,56 +24655,97 @@ export interface operations {
};
};
};
- get_starter_models: {
+ scan_for_models: {
parameters: {
- query?: never;
+ query?: {
+ /** @description Directory path to search for models */
+ scan_path?: string;
+ };
header?: never;
path?: never;
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Directory scanned successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["StarterModelResponse"];
+ "application/json": components["schemas"]["FoundModel"][];
+ };
+ };
+ /** @description Invalid directory path */
+ 400: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
- get_stats: {
+ get_hugging_face_models: {
parameters: {
- query?: never;
+ query?: {
+ /** @description Hugging face repo to search for models */
+ hugging_face_repo?: string;
+ };
header?: never;
path?: never;
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Hugging Face repo scanned successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["CacheStats"] | null;
+ "application/json": components["schemas"]["HuggingFaceModels"];
+ };
+ };
+ /** @description Invalid hugging face repo */
+ 400: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
- empty_model_cache: {
+ get_model_image: {
parameters: {
query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description The name of model image file to get */
+ key: string;
+ };
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description The model image was fetched successfully */
200: {
headers: {
[name: string]: unknown;
@@ -26753,49 +24754,99 @@ export interface operations {
"application/json": unknown;
};
};
+ /** @description Bad request */
+ 400: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description The model image could not be found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
};
};
- get_hf_login_status: {
+ delete_model_image: {
parameters: {
query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description Unique key of model image to remove from model_images directory. */
+ key: string;
+ };
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
- 200: {
+ /** @description Model image deleted successfully */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Model image not found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HFTokenStatus"];
+ "application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
- do_hf_login: {
+ update_model_image: {
parameters: {
query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description Unique key of model */
+ key: string;
+ };
cookie?: never;
};
requestBody: {
content: {
- "application/json": components["schemas"]["Body_do_hf_login"];
+ "multipart/form-data": components["schemas"]["Body_update_model_image"];
};
};
responses: {
- /** @description Successful Response */
+ /** @description The model image was updated successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HFTokenStatus"];
+ "application/json": unknown;
+ };
+ };
+ /** @description Bad request */
+ 400: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
/** @description Validation Error */
422: {
@@ -26808,7 +24859,7 @@ export interface operations {
};
};
};
- reset_hf_token: {
+ list_model_installs: {
parameters: {
query?: never;
header?: never;
@@ -26823,32 +24874,73 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HFTokenStatus"];
+ "application/json": components["schemas"]["ModelInstallJob"][];
};
};
};
};
- list_downloads: {
+ install_model: {
parameters: {
- query?: never;
+ query: {
+ /** @description Model source to install, can be a local path, repo_id, or remote URL */
+ source: string;
+ /** @description Whether or not to install a local model in place */
+ inplace?: boolean | null;
+ /** @description access token for the remote resource */
+ access_token?: string | null;
+ };
header?: never;
path?: never;
cookie?: never;
};
- requestBody?: never;
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ModelRecordChanges"];
+ };
+ };
responses: {
- /** @description Successful Response */
- 200: {
+ /** @description The model imported successfully */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ModelInstallJob"];
+ };
+ };
+ /** @description There is already a model corresponding to this path or repo_id */
+ 409: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Unrecognized file/folder format */
+ 415: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ /** @description The model appeared to import successfully, but could not be found in the model manager */
+ 424: {
headers: {
[name: string]: unknown;
};
- content: {
- "application/json": components["schemas"]["DownloadJob"][];
- };
+ content?: never;
};
};
};
- prune_downloads: {
+ prune_model_install_jobs: {
parameters: {
query?: never;
header?: never;
@@ -26866,7 +24958,7 @@ export interface operations {
"application/json": unknown;
};
};
- /** @description All completed jobs have been pruned */
+ /** @description All completed and errored jobs have been pruned */
204: {
headers: {
[name: string]: unknown;
@@ -26882,27 +24974,40 @@ export interface operations {
};
};
};
- download: {
+ install_hugging_face_model: {
parameters: {
- query?: never;
+ query: {
+ /** @description HuggingFace repo_id to install */
+ source: string;
+ };
header?: never;
path?: never;
cookie?: never;
};
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_download"];
- };
- };
+ requestBody?: never;
responses: {
- /** @description Successful Response */
- 200: {
+ /** @description The model is being installed */
+ 201: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DownloadJob"];
+ "text/html": string;
+ };
+ };
+ /** @description Bad request */
+ 400: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description There is already a model corresponding to this path or repo_id */
+ 409: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
/** @description Validation Error */
422: {
@@ -26915,12 +25020,12 @@ export interface operations {
};
};
};
- get_download_job: {
+ get_model_install_job: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description ID of the download job to fetch. */
+ /** @description Model install id */
id: number;
};
cookie?: never;
@@ -26933,10 +25038,10 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DownloadJob"];
+ "application/json": components["schemas"]["ModelInstallJob"];
};
};
- /** @description The requested download JobID could not be found */
+ /** @description No such job */
404: {
headers: {
[name: string]: unknown;
@@ -26954,20 +25059,20 @@ export interface operations {
};
};
};
- cancel_download_job: {
+ cancel_model_install_job: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description ID of the download job to cancel. */
+ /** @description Model install job ID */
id: number;
};
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
- 200: {
+ /** @description The job was cancelled successfully */
+ 201: {
headers: {
[name: string]: unknown;
};
@@ -26975,15 +25080,8 @@ export interface operations {
"application/json": unknown;
};
};
- /** @description Job has been cancelled */
- 204: {
- headers: {
- [name: string]: unknown;
- };
- content?: never;
- };
- /** @description The requested download JobID could not be found */
- 404: {
+ /** @description No such job */
+ 415: {
headers: {
[name: string]: unknown;
};
@@ -27000,106 +25098,120 @@ export interface operations {
};
};
};
- cancel_all_download_jobs: {
+ convert_model: {
parameters: {
query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description Unique key of the safetensors main model to convert to diffusers format. */
+ key: string;
+ };
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Model converted successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": unknown;
+ /** @example {
+ * "path": "string",
+ * "name": "string",
+ * "base": "sd-1",
+ * "type": "main",
+ * "format": "checkpoint",
+ * "config_path": "string",
+ * "key": "string",
+ * "hash": "string",
+ * "file_size": 1,
+ * "description": "string",
+ * "source": "string",
+ * "converted_at": 0,
+ * "variant": "normal",
+ * "prediction_type": "epsilon",
+ * "repo_variant": "fp16",
+ * "upcast_attention": false
+ * } */
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"];
};
};
- /** @description Download jobs have been cancelled */
- 204: {
+ /** @description Bad request */
+ 400: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Model not found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description There is already a model registered at this location */
+ 409: {
headers: {
[name: string]: unknown;
};
content?: never;
};
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
};
};
- upload_image: {
+ get_starter_models: {
parameters: {
- query: {
- /** @description The category of the image */
- image_category: components["schemas"]["ImageCategory"];
- /** @description Whether this is an intermediate image */
- is_intermediate: boolean;
- /** @description The board to add this image to, if any */
- board_id?: string | null;
- /** @description The session ID associated with this upload, if any */
- session_id?: string | null;
- /** @description Whether to crop the image */
- crop_visible?: boolean | null;
- };
+ query?: never;
header?: never;
path?: never;
cookie?: never;
};
- requestBody: {
- content: {
- "multipart/form-data": components["schemas"]["Body_upload_image"];
- };
- };
+ requestBody?: never;
responses: {
- /** @description The image was uploaded successfully */
- 201: {
+ /** @description Successful Response */
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageDTO"];
- };
- };
- /** @description Image upload failed */
- 415: {
- headers: {
- [name: string]: unknown;
+ "application/json": components["schemas"]["StarterModelResponse"];
};
- content?: never;
};
- /** @description Validation Error */
- 422: {
+ };
+ };
+ get_stats: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HTTPValidationError"];
+ "application/json": components["schemas"]["CacheStats"] | null;
};
};
};
};
- list_image_dtos: {
+ empty_model_cache: {
parameters: {
- query?: {
- /** @description The origin of images to list. */
- image_origin?: components["schemas"]["ResourceOrigin"] | null;
- /** @description The categories of image to include. */
- categories?: components["schemas"]["ImageCategory"][] | null;
- /** @description Whether to list intermediate images. */
- is_intermediate?: boolean | null;
- /** @description The board id to filter by. Use 'none' to find images without a board. */
- board_id?: string | null;
- /** @description The page offset */
- offset?: number;
- /** @description The number of images per page */
- limit?: number;
- /** @description The order of sort */
- order_dir?: components["schemas"]["SQLiteDirection"];
- /** @description Whether to sort by starred images first */
- starred_first?: boolean;
- /** @description The term to search for */
- search_term?: string | null;
- };
+ query?: never;
header?: never;
path?: never;
cookie?: never;
@@ -27112,21 +25224,32 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"];
+ "application/json": unknown;
};
};
- /** @description Validation Error */
- 422: {
+ };
+ };
+ get_hf_login_status: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HTTPValidationError"];
+ "application/json": components["schemas"]["HFTokenStatus"];
};
};
};
};
- create_image_upload_entry: {
+ do_hf_login: {
parameters: {
query?: never;
header?: never;
@@ -27135,7 +25258,7 @@ export interface operations {
};
requestBody: {
content: {
- "application/json": components["schemas"]["Body_create_image_upload_entry"];
+ "application/json": components["schemas"]["Body_do_hf_login"];
};
};
responses: {
@@ -27145,7 +25268,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageUploadEntry"];
+ "application/json": components["schemas"]["HFTokenStatus"];
};
};
/** @description Validation Error */
@@ -27159,14 +25282,11 @@ export interface operations {
};
};
};
- get_image_dto: {
+ reset_hf_token: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description The name of image to get */
- image_name: string;
- };
+ path?: never;
cookie?: never;
};
requestBody?: never;
@@ -27177,28 +25297,36 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageDTO"];
+ "application/json": components["schemas"]["HFTokenStatus"];
};
};
- /** @description Validation Error */
- 422: {
+ };
+ };
+ list_downloads: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HTTPValidationError"];
+ "application/json": components["schemas"]["DownloadJob"][];
};
};
};
};
- delete_image: {
+ prune_downloads: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description The name of the image to delete */
- image_name: string;
- };
+ path?: never;
cookie?: never;
};
requestBody?: never;
@@ -27209,33 +25337,35 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DeleteImagesResult"];
+ "application/json": unknown;
};
};
- /** @description Validation Error */
- 422: {
+ /** @description All completed jobs have been pruned */
+ 204: {
headers: {
[name: string]: unknown;
};
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
+ content?: never;
+ };
+ /** @description Bad request */
+ 400: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
};
};
- update_image: {
+ download: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description The name of the image to update */
- image_name: string;
- };
+ path?: never;
cookie?: never;
};
requestBody: {
content: {
- "application/json": components["schemas"]["ImageRecordChanges"];
+ "application/json": components["schemas"]["Body_download"];
};
};
responses: {
@@ -27245,7 +25375,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageDTO"];
+ "application/json": components["schemas"]["DownloadJob"];
};
};
/** @description Validation Error */
@@ -27259,53 +25389,52 @@ export interface operations {
};
};
};
- get_intermediates_count: {
+ get_download_job: {
parameters: {
query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description ID of the download job to fetch. */
+ id: number;
+ };
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Success */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": number;
+ "application/json": components["schemas"]["DownloadJob"];
};
};
- };
- };
- clear_intermediates: {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: never;
- responses: {
- /** @description Successful Response */
- 200: {
+ /** @description The requested download JobID could not be found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": number;
+ "application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
- get_image_metadata: {
+ cancel_download_job: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description The name of image to get */
- image_name: string;
+ /** @description ID of the download job to cancel. */
+ id: number;
};
cookie?: never;
};
@@ -27317,8 +25446,22 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["MetadataField"] | null;
+ "application/json": unknown;
+ };
+ };
+ /** @description Job has been cancelled */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description The requested download JobID could not be found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
/** @description Validation Error */
422: {
@@ -27331,14 +25474,11 @@ export interface operations {
};
};
};
- get_image_workflow: {
+ cancel_all_download_jobs: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description The name of image whose workflow to get */
- image_name: string;
- };
+ path?: never;
cookie?: never;
};
requestBody?: never;
@@ -27349,43 +25489,53 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["WorkflowAndGraphResponse"];
+ "application/json": unknown;
};
};
- /** @description Validation Error */
- 422: {
+ /** @description Download jobs have been cancelled */
+ 204: {
headers: {
[name: string]: unknown;
};
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
+ content?: never;
};
};
};
- get_image_full: {
+ upload_image: {
parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description The name of full-resolution image file to get */
- image_name: string;
+ query: {
+ /** @description The category of the image */
+ image_category: components["schemas"]["ImageCategory"];
+ /** @description Whether this is an intermediate image */
+ is_intermediate: boolean;
+ /** @description The board to add this image to, if any */
+ board_id?: string | null;
+ /** @description The session ID associated with this upload, if any */
+ session_id?: string | null;
+ /** @description Whether to crop the image */
+ crop_visible?: boolean | null;
};
+ header?: never;
+ path?: never;
cookie?: never;
};
- requestBody?: never;
+ requestBody: {
+ content: {
+ "multipart/form-data": components["schemas"]["Body_upload_image"];
+ };
+ };
responses: {
- /** @description Return the full-resolution image */
- 200: {
+ /** @description The image was uploaded successfully */
+ 201: {
headers: {
[name: string]: unknown;
};
content: {
- "image/png": unknown;
+ "application/json": components["schemas"]["ImageDTO"];
};
};
- /** @description Image not found */
- 404: {
+ /** @description Image upload failed */
+ 415: {
headers: {
[name: string]: unknown;
};
@@ -27402,33 +25552,75 @@ export interface operations {
};
};
};
- get_image_full_head: {
+ list_image_dtos: {
parameters: {
- query?: never;
- header?: never;
- path: {
- /** @description The name of full-resolution image file to get */
- image_name: string;
+ query?: {
+ /** @description The origin of images to list. */
+ image_origin?: components["schemas"]["ResourceOrigin"] | null;
+ /** @description The categories of image to include. */
+ categories?: components["schemas"]["ImageCategory"][] | null;
+ /** @description Whether to list intermediate images. */
+ is_intermediate?: boolean | null;
+ /** @description The board id to filter by. Use 'none' to find images without a board. */
+ board_id?: string | null;
+ /** @description The page offset */
+ offset?: number;
+ /** @description The number of images per page */
+ limit?: number;
+ /** @description The order of sort */
+ order_dir?: components["schemas"]["SQLiteDirection"];
+ /** @description Whether to sort by starred images first */
+ starred_first?: boolean;
+ /** @description The term to search for */
+ search_term?: string | null;
};
+ header?: never;
+ path?: never;
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Return the full-resolution image */
+ /** @description Successful Response */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "image/png": unknown;
+ "application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"];
};
};
- /** @description Image not found */
- 404: {
+ /** @description Validation Error */
+ 422: {
headers: {
[name: string]: unknown;
};
- content?: never;
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_image_upload_entry: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_create_image_upload_entry"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ImageUploadEntry"];
+ };
};
/** @description Validation Error */
422: {
@@ -27441,33 +25633,26 @@ export interface operations {
};
};
};
- get_image_thumbnail: {
+ get_image_dto: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description The name of thumbnail image file to get */
+ /** @description The name of image to get */
image_name: string;
};
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Return the image thumbnail */
+ /** @description Successful Response */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "image/webp": unknown;
- };
- };
- /** @description Image not found */
- 404: {
- headers: {
- [name: string]: unknown;
+ "application/json": components["schemas"]["ImageDTO"];
};
- content?: never;
};
/** @description Validation Error */
422: {
@@ -27480,12 +25665,12 @@ export interface operations {
};
};
};
- get_image_urls: {
+ delete_image: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description The name of the image whose URL to get */
+ /** @description The name of the image to delete */
image_name: string;
};
cookie?: never;
@@ -27498,7 +25683,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageUrlsDTO"];
+ "application/json": components["schemas"]["DeleteImagesResult"];
};
};
/** @description Validation Error */
@@ -27512,16 +25697,19 @@ export interface operations {
};
};
};
- delete_images_from_list: {
+ update_image: {
parameters: {
query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description The name of the image to update */
+ image_name: string;
+ };
cookie?: never;
};
requestBody: {
content: {
- "application/json": components["schemas"]["Body_delete_images_from_list"];
+ "application/json": components["schemas"]["ImageRecordChanges"];
};
};
responses: {
@@ -27531,7 +25719,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DeleteImagesResult"];
+ "application/json": components["schemas"]["ImageDTO"];
};
};
/** @description Validation Error */
@@ -27545,7 +25733,7 @@ export interface operations {
};
};
};
- delete_uncategorized_images: {
+ get_intermediates_count: {
parameters: {
query?: never;
header?: never;
@@ -27560,56 +25748,42 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DeleteImagesResult"];
+ "application/json": number;
};
};
};
};
- star_images_in_list: {
+ clear_intermediates: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_star_images_in_list"];
- };
- };
- responses: {
- /** @description Successful Response */
- 200: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["StarredImagesResult"];
- };
- };
- /** @description Validation Error */
- 422: {
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["HTTPValidationError"];
+ "application/json": number;
};
};
};
};
- unstar_images_in_list: {
+ get_image_metadata: {
parameters: {
query?: never;
header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_unstar_images_in_list"];
+ path: {
+ /** @description The name of image to get */
+ image_name: string;
};
+ cookie?: never;
};
+ requestBody?: never;
responses: {
/** @description Successful Response */
200: {
@@ -27617,7 +25791,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["UnstarredImagesResult"];
+ "application/json": components["schemas"]["MetadataField"] | null;
};
};
/** @description Validation Error */
@@ -27631,26 +25805,25 @@ export interface operations {
};
};
};
- download_images_from_list: {
+ get_image_workflow: {
parameters: {
query?: never;
header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody?: {
- content: {
- "application/json": components["schemas"]["Body_download_images_from_list"];
+ path: {
+ /** @description The name of image whose workflow to get */
+ image_name: string;
};
+ cookie?: never;
};
+ requestBody?: never;
responses: {
/** @description Successful Response */
- 202: {
+ 200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImagesDownloaded"];
+ "application/json": components["schemas"]["WorkflowAndGraphResponse"];
};
};
/** @description Validation Error */
@@ -27664,25 +25837,25 @@ export interface operations {
};
};
};
- get_bulk_download_item: {
+ get_image_full: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description The bulk_download_item_name of the bulk download item to get */
- bulk_download_item_name: string;
+ /** @description The name of full-resolution image file to get */
+ image_name: string;
};
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Return the complete bulk download item */
+ /** @description Return the full-resolution image */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/zip": unknown;
+ "image/png": unknown;
};
};
/** @description Image not found */
@@ -27703,38 +25876,33 @@ export interface operations {
};
};
};
- get_image_names: {
+ get_image_full_head: {
parameters: {
- query?: {
- /** @description The origin of images to list. */
- image_origin?: components["schemas"]["ResourceOrigin"] | null;
- /** @description The categories of image to include. */
- categories?: components["schemas"]["ImageCategory"][] | null;
- /** @description Whether to list intermediate images. */
- is_intermediate?: boolean | null;
- /** @description The board id to filter by. Use 'none' to find images without a board. */
- board_id?: string | null;
- /** @description The order of sort */
- order_dir?: components["schemas"]["SQLiteDirection"];
- /** @description Whether to sort by starred images first */
- starred_first?: boolean;
- /** @description The term to search for */
- search_term?: string | null;
- };
+ query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description The name of full-resolution image file to get */
+ image_name: string;
+ };
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Return the full-resolution image */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageNamesResult"];
+ "image/png": unknown;
+ };
+ };
+ /** @description Image not found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
/** @description Validation Error */
422: {
@@ -27747,27 +25915,33 @@ export interface operations {
};
};
};
- get_images_by_names: {
+ get_image_thumbnail: {
parameters: {
query?: never;
header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_get_images_by_names"];
+ path: {
+ /** @description The name of thumbnail image file to get */
+ image_name: string;
};
+ cookie?: never;
};
+ requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Return the image thumbnail */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["ImageDTO"][];
+ "image/webp": unknown;
+ };
+ };
+ /** @description Image not found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
/** @description Validation Error */
422: {
@@ -27780,13 +25954,13 @@ export interface operations {
};
};
};
- get_video_dto: {
+ get_image_urls: {
parameters: {
query?: never;
header?: never;
path: {
- /** @description The id of the video to get */
- video_id: string;
+ /** @description The name of the image whose URL to get */
+ image_name: string;
};
cookie?: never;
};
@@ -27798,7 +25972,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["VideoDTO"];
+ "application/json": components["schemas"]["ImageUrlsDTO"];
};
};
/** @description Validation Error */
@@ -27812,19 +25986,16 @@ export interface operations {
};
};
};
- update_video: {
+ delete_images_from_list: {
parameters: {
query?: never;
header?: never;
- path: {
- /** @description The id of the video to update */
- video_id: string;
- };
+ path?: never;
cookie?: never;
};
requestBody: {
content: {
- "application/json": components["schemas"]["VideoRecordChanges"];
+ "application/json": components["schemas"]["Body_delete_images_from_list"];
};
};
responses: {
@@ -27834,7 +26005,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["VideoDTO"];
+ "application/json": components["schemas"]["DeleteImagesResult"];
};
};
/** @description Validation Error */
@@ -27848,18 +26019,14 @@ export interface operations {
};
};
};
- delete_videos_from_list: {
+ delete_uncategorized_images: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_delete_videos_from_list"];
- };
- };
+ requestBody?: never;
responses: {
/** @description Successful Response */
200: {
@@ -27867,21 +26034,12 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DeleteVideosResult"];
- };
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
+ "application/json": components["schemas"]["DeleteImagesResult"];
};
};
};
};
- star_videos_in_list: {
+ star_images_in_list: {
parameters: {
query?: never;
header?: never;
@@ -27890,7 +26048,7 @@ export interface operations {
};
requestBody: {
content: {
- "application/json": components["schemas"]["Body_star_videos_in_list"];
+ "application/json": components["schemas"]["Body_star_images_in_list"];
};
};
responses: {
@@ -27900,7 +26058,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["StarredVideosResult"];
+ "application/json": components["schemas"]["StarredImagesResult"];
};
};
/** @description Validation Error */
@@ -27914,7 +26072,7 @@ export interface operations {
};
};
};
- unstar_videos_in_list: {
+ unstar_images_in_list: {
parameters: {
query?: never;
header?: never;
@@ -27923,7 +26081,7 @@ export interface operations {
};
requestBody: {
content: {
- "application/json": components["schemas"]["Body_unstar_videos_in_list"];
+ "application/json": components["schemas"]["Body_unstar_images_in_list"];
};
};
responses: {
@@ -27933,7 +26091,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["UnstarredVideosResult"];
+ "application/json": components["schemas"]["UnstarredImagesResult"];
};
};
/** @description Validation Error */
@@ -27947,58 +26105,66 @@ export interface operations {
};
};
};
- delete_uncategorized_videos: {
+ download_images_from_list: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
- requestBody?: never;
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_download_images_from_list"];
+ };
+ };
responses: {
/** @description Successful Response */
- 200: {
+ 202: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ImagesDownloaded"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["DeleteVideosResult"];
+ "application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
- list_video_dtos: {
+ get_bulk_download_item: {
parameters: {
- query?: {
- /** @description Whether to list intermediate videos. */
- is_intermediate?: boolean | null;
- /** @description The board id to filter by. Use 'none' to find videos without a board. */
- board_id?: string | null;
- /** @description The page offset */
- offset?: number;
- /** @description The number of videos per page */
- limit?: number;
- /** @description The order of sort */
- order_dir?: components["schemas"]["SQLiteDirection"];
- /** @description Whether to sort by starred videos first */
- starred_first?: boolean;
- /** @description The term to search for */
- search_term?: string | null;
- };
+ query?: never;
header?: never;
- path?: never;
+ path: {
+ /** @description The bulk_download_item_name of the bulk download item to get */
+ bulk_download_item_name: string;
+ };
cookie?: never;
};
requestBody?: never;
responses: {
- /** @description Successful Response */
+ /** @description Return the complete bulk download item */
200: {
headers: {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["OffsetPaginatedResults_VideoDTO_"];
+ "application/zip": unknown;
+ };
+ };
+ /** @description Image not found */
+ 404: {
+ headers: {
+ [name: string]: unknown;
};
+ content?: never;
};
/** @description Validation Error */
422: {
@@ -28011,16 +26177,20 @@ export interface operations {
};
};
};
- get_video_ids: {
+ get_image_names: {
parameters: {
query?: {
- /** @description Whether to list intermediate videos. */
+ /** @description The origin of images to list. */
+ image_origin?: components["schemas"]["ResourceOrigin"] | null;
+ /** @description The categories of image to include. */
+ categories?: components["schemas"]["ImageCategory"][] | null;
+ /** @description Whether to list intermediate images. */
is_intermediate?: boolean | null;
- /** @description The board id to filter by. Use 'none' to find videos without a board. */
+ /** @description The board id to filter by. Use 'none' to find images without a board. */
board_id?: string | null;
/** @description The order of sort */
order_dir?: components["schemas"]["SQLiteDirection"];
- /** @description Whether to sort by starred videos first */
+ /** @description Whether to sort by starred images first */
starred_first?: boolean;
/** @description The term to search for */
search_term?: string | null;
@@ -28037,7 +26207,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["VideoIdsResult"];
+ "application/json": components["schemas"]["ImageNamesResult"];
};
};
/** @description Validation Error */
@@ -28051,7 +26221,7 @@ export interface operations {
};
};
};
- get_videos_by_ids: {
+ get_images_by_names: {
parameters: {
query?: never;
header?: never;
@@ -28060,7 +26230,7 @@ export interface operations {
};
requestBody: {
content: {
- "application/json": components["schemas"]["Body_get_videos_by_ids"];
+ "application/json": components["schemas"]["Body_get_images_by_names"];
};
};
responses: {
@@ -28070,7 +26240,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["VideoDTO"][];
+ "application/json": components["schemas"]["ImageDTO"][];
};
};
/** @description Validation Error */
@@ -28131,8 +26301,6 @@ export interface operations {
query: {
/** @description The name of the board to create */
board_name: string;
- /** @description Whether the board is private */
- is_private?: boolean;
};
header?: never;
path?: never;
@@ -28432,72 +26600,6 @@ export interface operations {
};
};
};
- add_videos_to_board: {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_add_videos_to_board"];
- };
- };
- responses: {
- /** @description Videos were added to board successfully */
- 201: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["AddVideosToBoardResult"];
- };
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
- remove_videos_from_board: {
- parameters: {
- query?: never;
- header?: never;
- path?: never;
- cookie?: never;
- };
- requestBody: {
- content: {
- "application/json": components["schemas"]["Body_remove_videos_from_board"];
- };
- };
- responses: {
- /** @description Videos were removed from board successfully */
- 201: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["RemoveVideosFromBoardResult"];
- };
- };
- /** @description Validation Error */
- 422: {
- headers: {
- [name: string]: unknown;
- };
- content: {
- "application/json": components["schemas"]["HTTPValidationError"];
- };
- };
- };
- };
get_related_models: {
parameters: {
query?: never;
@@ -28729,7 +26831,7 @@ export interface operations {
};
};
};
- get_config: {
+ get_patchmatch_status: {
parameters: {
query?: never;
header?: never;
@@ -28744,7 +26846,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["AppConfig"];
+ "application/json": boolean;
};
};
};
@@ -29769,8 +27871,6 @@ export interface operations {
query?: string | null;
/** @description Whether to include/exclude recent workflows */
has_been_opened?: boolean | null;
- /** @description Whether to include/exclude published workflows */
- is_published?: boolean | null;
};
header?: never;
path?: never;
diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts
index 8b0cafe46b3..fa4c04a62eb 100644
--- a/invokeai/frontend/web/src/services/api/types.ts
+++ b/invokeai/frontend/web/src/services/api/types.ts
@@ -14,9 +14,6 @@ export type GetImageNamesResult =
paths['/api/v1/images/names']['get']['responses']['200']['content']['application/json'];
export type GetImageNamesArgs = NonNullable;
-export type GetVideoIdsResult = paths['/api/v1/videos/ids']['get']['responses']['200']['content']['application/json'];
-export type GetVideoIdsArgs = NonNullable;
-
export type ListBoardsArgs = NonNullable;
export type CreateBoardArg = paths['/api/v1/boards/']['post']['parameters']['query'];
@@ -46,7 +43,6 @@ export type InvocationJSONSchemaExtra = S['UIConfigBase'];
// App Info
export type AppVersion = S['AppVersion'];
-export type AppConfig = S['AppConfig'];
const zResourceOrigin = z.enum(['internal', 'external']);
type ResourceOrigin = z.infer;
@@ -76,35 +72,10 @@ const _zImageDTO = z.object({
});
export type ImageDTO = z.infer;
assert>();
-export const isImageDTO = (dto: ImageDTO | VideoDTO): dto is ImageDTO => {
- return 'image_name' in dto;
-};
export type BoardDTO = S['BoardDTO'];
export type OffsetPaginatedResults_ImageDTO_ = S['OffsetPaginatedResults_ImageDTO_'];
-// Videos
-const _zVideoDTO = z.object({
- video_id: z.string(),
- video_url: z.string(),
- thumbnail_url: z.string(),
- width: z.number().int().gt(0),
- height: z.number().int().gt(0),
- created_at: z.string(),
- updated_at: z.string(),
- deleted_at: z.string().nullish(),
- starred: z.boolean(),
- board_id: z.string().nullish(),
- is_intermediate: z.boolean(),
- session_id: z.string().nullish(),
- node_id: z.string().nullish(),
-});
-export type VideoDTO = z.infer;
-assert>();
-export const isVideoDTO = (dto: ImageDTO | VideoDTO): dto is VideoDTO => {
- return 'video_id' in dto;
-};
-
// Model Configs
export type AnyModelConfig = S['AnyModelConfig'];
export type MainModelConfig = Extract;
@@ -130,7 +101,6 @@ type CLIPVisionDiffusersConfig = Extract;
export type FLUXReduxModelConfig = Extract;
type ApiModelConfig = Extract;
-export type VideoApiModelConfig = Extract;
type UnknownModelConfig = Extract;
export type FLUXKontextModelConfig = MainModelConfig;
export type ChatGPT4oModelConfig = ApiModelConfig;
@@ -298,30 +268,14 @@ export const isFluxReduxModelConfig = (config: AnyModelConfig): config is FLUXRe
return config.type === 'flux_redux';
};
-export const isChatGPT4oModelConfig = (config: AnyModelConfig): config is ChatGPT4oModelConfig => {
- return config.type === 'main' && config.base === 'chatgpt-4o';
-};
-
-export const isVideoModelConfig = (config: AnyModelConfig): config is VideoApiModelConfig => {
- return config.type === 'video';
-};
-
export const isUnknownModelConfig = (config: AnyModelConfig): config is UnknownModelConfig => {
return config.type === 'unknown';
};
-export const isFluxKontextApiModelConfig = (config: AnyModelConfig): config is ApiModelConfig => {
- return config.type === 'main' && config.base === 'flux-kontext';
-};
-
export const isFluxKontextModelConfig = (config: AnyModelConfig): config is FLUXKontextModelConfig => {
return config.type === 'main' && config.base === 'flux' && config.name.toLowerCase().includes('kontext');
};
-export const isGemini2_5ModelConfig = (config: AnyModelConfig): config is ApiModelConfig => {
- return config.type === 'main' && config.base === 'gemini-2.5';
-};
-
export const isNonRefinerMainModelConfig = (config: AnyModelConfig): config is MainModelConfig => {
return config.type === 'main' && config.base !== 'sdxl-refiner';
};
diff --git a/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts b/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts
index ca79a99ca49..772dc077fb5 100644
--- a/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts
+++ b/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts
@@ -1,5 +1,5 @@
import type { OrderDir } from 'features/gallery/store/types';
-import type { GetImageNamesResult, GetVideoIdsResult, ImageDTO, VideoDTO } from 'services/api/types';
+import type { GetImageNamesResult, ImageDTO } from 'services/api/types';
/**
* Calculates the optimal insertion position for a new image in the names list.
@@ -57,60 +57,3 @@ export function insertImageIntoNamesResult(
total_count: currentResult.total_count + 1,
};
}
-
-/**
- * Calculates the optimal insertion position for a new image in the names list.
- * For starred_first=true: starred images go to position 0, unstarred go after all starred images
- * For starred_first=false: all new images go to position 0 (newest first)
- */
-function calculateVideoInsertionPosition(
- videoDTO: VideoDTO,
- starredFirst: boolean,
- starredCount: number,
- orderDir: OrderDir = 'DESC'
-): number {
- if (!starredFirst) {
- // When starred_first is false, insertion depends on order direction
- return orderDir === 'DESC' ? 0 : Number.MAX_SAFE_INTEGER;
- }
-
- // When starred_first is true
- if (videoDTO.starred) {
- // Starred images: beginning for desc, after existing starred for asc
- return orderDir === 'DESC' ? 0 : starredCount;
- }
-
- // Unstarred images go after all starred images
- return orderDir === 'DESC' ? starredCount : Number.MAX_SAFE_INTEGER;
-}
-
-/**
- * Optimistically inserts a new image into the ImageNamesResult at the correct position
- */
-export function insertVideoIntoGetVideoIdsResult(
- currentResult: GetVideoIdsResult,
- videoDTO: VideoDTO,
- starredFirst: boolean,
- orderDir: OrderDir = 'DESC'
-): GetVideoIdsResult {
- // Don't insert if the image is already in the list
- if (currentResult.video_ids.includes(videoDTO.video_id)) {
- return currentResult;
- }
-
- const insertPosition = calculateVideoInsertionPosition(videoDTO, starredFirst, currentResult.starred_count, orderDir);
-
- const newVideoIds = [...currentResult.video_ids];
- // Handle MAX_SAFE_INTEGER by pushing to end
- if (insertPosition >= newVideoIds.length) {
- newVideoIds.push(videoDTO.video_id);
- } else {
- newVideoIds.splice(insertPosition, 0, videoDTO.video_id);
- }
-
- return {
- video_ids: newVideoIds,
- starred_count: starredFirst && videoDTO.starred ? currentResult.starred_count + 1 : currentResult.starred_count,
- total_count: currentResult.total_count + 1,
- };
-}
diff --git a/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts b/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts
index b7a0c73df1a..477a5a03f87 100644
--- a/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts
+++ b/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts
@@ -4,7 +4,7 @@ import { getListImagesUrl } from 'services/api/util';
import type { ApiTagDescription } from '..';
export const getTagsToInvalidateForBoardAffectingMutation = (affected_boards: string[]): ApiTagDescription[] => {
- const tags: ApiTagDescription[] = ['ImageNameList', 'VideoIdList'];
+ const tags: ApiTagDescription[] = ['ImageNameList'];
for (const board_id of affected_boards) {
tags.push({
@@ -57,20 +57,3 @@ export const getTagsToInvalidateForImageMutation = (image_names: string[]): ApiT
return tags;
};
-
-export const getTagsToInvalidateForVideoMutation = (video_ids: string[]): ApiTagDescription[] => {
- const tags: ApiTagDescription[] = [];
-
- for (const video_id of video_ids) {
- tags.push({
- type: 'Video',
- id: video_id,
- });
- // tags.push({
- // type: 'VideoMetadata',
- // id: video_id,
- // });
- }
-
- return tags;
-};
diff --git a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx
index 57c8ab7b7b1..e2ee74dcad1 100644
--- a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx
+++ b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx
@@ -5,21 +5,19 @@ import {
selectAutoSwitch,
selectGalleryView,
selectGetImageNamesQueryArgs,
- selectGetVideoIdsQueryArgs,
selectListBoardsQueryArgs,
selectSelectedBoardId,
} from 'features/gallery/store/gallerySelectors';
-import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice';
+import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
-import { isImageField, isImageFieldCollection, isVideoField } from 'features/nodes/types/common';
+import { isImageField, isImageFieldCollection } from 'features/nodes/types/common';
import { zNodeStatus } from 'features/nodes/types/invocation';
import type { LRUCache } from 'lru-cache';
import { boardsApi } from 'services/api/endpoints/boards';
import { getImageDTOSafe, imagesApi } from 'services/api/endpoints/images';
-import { getVideoDTOSafe, videosApi } from 'services/api/endpoints/videos';
-import type { ImageDTO, S, VideoDTO } from 'services/api/types';
+import type { ImageDTO, S } from 'services/api/types';
import { getCategories } from 'services/api/util';
-import { insertImageIntoNamesResult, insertVideoIntoGetVideoIdsResult } from 'services/api/util/optimisticUpdates';
+import { insertImageIntoNamesResult } from 'services/api/util/optimisticUpdates';
import { $lastProgressEvent } from 'services/events/stores';
import stableHash from 'stable-hash';
import type { Param0 } from 'tsafe';
@@ -28,8 +26,18 @@ import type { JsonObject } from 'type-fest';
const log = logger('events');
+// These nodes are passthrough nodes. They do not add images to the gallery, so we must skip that handling for them.
const nodeTypeDenylist = ['load_image', 'image'];
+/**
+ * Builds the socket event handler for invocation complete events. Adds output images to the gallery and/or updates
+ * node execution states for the workflow editor.
+ *
+ * @param getState The Redux getState function.
+ * @param dispatch The Redux dispatch function.
+ * @param finishedQueueItemIds A cache of finished queue item IDs to prevent duplicate handling and avoid race
+ * conditions that can happen when a graph finishes very quickly.
+ */
export const buildOnInvocationComplete = (
getState: AppGetState,
dispatch: AppDispatch,
@@ -172,7 +180,7 @@ export const buildOnInvocationComplete = (
boardIdSelected({
boardId: board_id,
select: {
- selection: [{ type: 'image', id: image_name }],
+ selection: [image_name],
galleryView: 'images',
},
})
@@ -184,161 +192,10 @@ export const buildOnInvocationComplete = (
dispatch(galleryViewChanged('images'));
}
// Select the image immediately since we've optimistically updated the cache
- dispatch(itemSelected({ type: 'image', id: lastImageDTO.image_name }));
+ dispatch(imageSelected(lastImageDTO.image_name));
}
};
- const addVideosToGallery = async (data: S['InvocationCompleteEvent']) => {
- if (nodeTypeDenylist.includes(data.invocation.type)) {
- log.trace(`Skipping denylisted node type (${data.invocation.type})`);
- return;
- }
-
- const videoDTOs = await getResultVideoDTOs(data);
- if (videoDTOs.length === 0) {
- return;
- }
-
- // For efficiency's sake, we want to minimize the number of dispatches and invalidations we do.
- // We'll keep track of each change we need to make and do them all at once.
- const boardTotalAdditions: Record = {};
- const getVideoIdsArg = selectGetVideoIdsQueryArgs(getState());
-
- for (const videoDTO of videoDTOs) {
- if (videoDTO.is_intermediate) {
- return;
- }
-
- const board_id = videoDTO.board_id ?? 'none';
-
- boardTotalAdditions[board_id] = (boardTotalAdditions[board_id] || 0) + 1;
- }
-
- // Update all the board image totals at once
- const entries: Param0 = [];
- for (const [boardId, amountToAdd] of objectEntries(boardTotalAdditions)) {
- // upsertQueryEntries doesn't provide a "recipe" function for the update - we must provide the new value
- // directly. So we need to select the board totals first.
- const total = boardsApi.endpoints.getBoardImagesTotal.select(boardId)(getState()).data?.total;
- if (total === undefined) {
- // No cache exists for this board, so we can't update it.
- continue;
- }
- entries.push({
- endpointName: 'getBoardImagesTotal',
- arg: boardId,
- value: { total: total + amountToAdd },
- });
- }
- dispatch(boardsApi.util.upsertQueryEntries(entries));
-
- dispatch(
- boardsApi.util.updateQueryData('listAllBoards', selectListBoardsQueryArgs(getState()), (draft) => {
- for (const board of draft) {
- board.image_count = board.image_count + (boardTotalAdditions[board.board_id] ?? 0);
- }
- })
- );
-
- /**
- * Optimistic update and cache invalidation for image names queries that match this image's board and categories.
- * - Optimistic update for the cache that does not have a search term (we cannot derive the correct insertion
- * position when a search term is present).
- * - Cache invalidation for the query that has a search term, so it will be refetched.
- *
- * Note: The image DTO objects are already implicitly cached by the getResultImageDTOs function. We do not need
- * to explicitly cache them again here.
- */
- for (const videoDTO of videoDTOs) {
- // Override board_id and categories for this specific image to build the "expected" args for the query.
- const videoSpecificArgs = {
- board_id: videoDTO.board_id ?? 'none',
- };
-
- const expectedQueryArgs = {
- ...getVideoIdsArg,
- ...videoSpecificArgs,
- search_term: '',
- };
-
- // If the cache for the query args provided here does not exist, RTK Query will ignore the update.
- dispatch(
- videosApi.util.updateQueryData(
- 'getVideoIds',
- {
- ...getVideoIdsArg,
- ...videoSpecificArgs,
- search_term: '',
- },
- (draft) => {
- const updatedResult = insertVideoIntoGetVideoIdsResult(
- draft,
- videoDTO,
- expectedQueryArgs.starred_first ?? true,
- expectedQueryArgs.order_dir
- );
-
- draft.video_ids = updatedResult.video_ids;
- draft.starred_count = updatedResult.starred_count;
- draft.total_count = updatedResult.total_count;
- }
- )
- );
-
- // If there is a search term present, we need to invalidate that query to ensure the search results are updated.
- if (getVideoIdsArg.search_term) {
- const expectedQueryArgs = {
- ...getVideoIdsArg,
- ...videoSpecificArgs,
- };
- dispatch(videosApi.util.invalidateTags([{ type: 'VideoList', id: stableHash(expectedQueryArgs) }]));
- }
- }
-
- // No need to invalidate tags since we're doing optimistic updates
- // Board totals are already updated above via upsertQueryEntries
-
- const autoSwitch = selectAutoSwitch(getState());
-
- if (!autoSwitch) {
- return;
- }
-
- // Finally, we may need to autoswitch to the new video. We'll only do it for the last video in the list.
- const lastVideoDTO = videoDTOs.at(-1);
-
- if (!lastVideoDTO) {
- return;
- }
-
- const { video_id } = lastVideoDTO;
- const board_id = lastVideoDTO.board_id ?? 'none';
-
- // With optimistic updates, we can immediately switch to the new image
- const selectedBoardId = selectSelectedBoardId(getState());
-
- // If the video is from a different board, switch to that board & select the video - otherwise just select the
- // video. This implicitly changes the view to 'videos' if it was not already.
- if (board_id !== selectedBoardId) {
- dispatch(
- boardIdSelected({
- boardId: board_id,
- select: {
- selection: [{ type: 'video', id: video_id }],
- galleryView: 'videos',
- },
- })
- );
- } else {
- // Ensure we are on the 'videos' gallery view - that's where this video will be displayed
- const galleryView = selectGalleryView(getState());
- if (galleryView !== 'videos') {
- dispatch(galleryViewChanged('videos'));
- }
- // Select the video immediately since we've optimistically updated the cache
- dispatch(itemSelected({ type: 'video', id: lastVideoDTO.video_id }));
- }
- };
const getResultImageDTOs = async (data: S['InvocationCompleteEvent']): Promise => {
const { result } = data;
const imageDTOs: ImageDTO[] = [];
@@ -360,22 +217,6 @@ export const buildOnInvocationComplete = (
return imageDTOs;
};
- const getResultVideoDTOs = async (data: S['InvocationCompleteEvent']): Promise => {
- const { result } = data;
- const videoDTOs: VideoDTO[] = [];
-
- for (const [_name, value] of objectEntries(result)) {
- if (isVideoField(value)) {
- const videoDTO = await getVideoDTOSafe(value.video_id);
- if (videoDTO) {
- videoDTOs.push(videoDTO);
- }
- }
- }
-
- return videoDTOs;
- };
-
return async (data: S['InvocationCompleteEvent']) => {
if (finishedQueueItemIds.has(data.item_id)) {
log.trace({ data } as JsonObject, `Received event for already-finished queue item ${data.item_id}`);
@@ -396,7 +237,6 @@ export const buildOnInvocationComplete = (
}
await addImagesToGallery(data);
- await addVideosToGallery(data);
$lastProgressEvent.set(null);
};
diff --git a/invokeai/frontend/web/src/services/events/onModelInstallError.tsx b/invokeai/frontend/web/src/services/events/onModelInstallError.tsx
index 4b57381f196..24c31c4a296 100644
--- a/invokeai/frontend/web/src/services/events/onModelInstallError.tsx
+++ b/invokeai/frontend/web/src/services/events/onModelInstallError.tsx
@@ -1,9 +1,7 @@
import { Button, ExternalLink, Spinner, Text } from '@invoke-ai/ui-library';
-import { skipToken } from '@reduxjs/toolkit/query';
import { logger } from 'app/logging/logger';
import type { AppDispatch, AppGetState } from 'app/store/store';
import { getPrefixedId } from 'features/controlLayers/konva/util';
-import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { discordLink, githubIssuesLink } from 'features/system/store/constants';
import { toast, toastApi } from 'features/toast/toast';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -41,6 +39,9 @@ const getHFTokenStatus = async (dispatch: AppDispatch): Promise {
return async (data: S['ModelInstallErrorEvent']) => {
log.error({ data }, 'Model install error');
@@ -145,8 +146,7 @@ export const buildOnModelInstallError = (getState: AppGetState, dispatch: AppDis
};
const HFUnauthorizedToastDescription = () => {
- const isEnabled = useFeatureStatus('hfToken');
- const { data } = useGetHFTokenStatusQuery(isEnabled ? undefined : skipToken);
+ const { data } = useGetHFTokenStatusQuery();
const { t } = useTranslation();
diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx
index 5804399f1f7..f998627d26c 100644
--- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx
+++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx
@@ -1,19 +1,9 @@
import { ExternalLink, Flex, Text } from '@invoke-ai/ui-library';
-import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import { socketConnected } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
-import { $baseUrl } from 'app/store/nanostores/baseUrl';
-import { $bulkDownloadId } from 'app/store/nanostores/bulkDownloadId';
-import { $queueId } from 'app/store/nanostores/queueId';
import type { AppStore } from 'app/store/store';
-import { listenerMiddleware } from 'app/store/store';
import { deepClone } from 'common/util/deepClone';
import { forEach, isNil, round } from 'es-toolkit/compat';
-import {
- $isInPublishFlow,
- $outputNodeId,
- $validationRunData,
-} from 'features/nodes/components/sidePanel/workflow/publish';
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
import { zNodeStatus } from 'features/nodes/types/invocation';
import ErrorToastDescription, { getTitle } from 'features/toast/ErrorToastDescription';
@@ -25,7 +15,6 @@ import type { ApiTagDescription } from 'services/api';
import { api, LIST_ALL_TAG, LIST_TAG } from 'services/api';
import { modelsApi } from 'services/api/endpoints/models';
import { queueApi } from 'services/api/endpoints/queue';
-import { workflowsApi } from 'services/api/endpoints/workflows';
import { buildOnInvocationComplete } from 'services/events/onInvocationComplete';
import { buildOnModelInstallError, DiscordLink, GitHubIssuesLink } from 'services/events/onModelInstallError';
import type { ClientToServerEvents, ServerToClientEvents } from 'services/events/types';
@@ -44,6 +33,10 @@ type SetEventListenersArg = {
const selectModelInstalls = modelsApi.endpoints.listModelInstalls.select();
+/**
+ * Sets up event listeners for the socketio client. Some components will set up their own listeners. These are the ones
+ * that have app-wide implications.
+ */
export const setEventListeners = ({ socket, store, setIsConnected }: SetEventListenersArg) => {
const { dispatch, getState } = store;
@@ -55,12 +48,8 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
log.debug('Connected');
setIsConnected(true);
dispatch(socketConnected());
- const queue_id = $queueId.get();
- socket.emit('subscribe_queue', { queue_id });
- if (!$baseUrl.get()) {
- const bulk_download_id = $bulkDownloadId.get();
- socket.emit('subscribe_bulk_download', { bulk_download_id });
- }
+ socket.emit('subscribe_queue', { queue_id: 'default' });
+ socket.emit('subscribe_bulk_download', { bulk_download_id: 'default' });
$lastProgressEvent.set(null);
});
@@ -371,7 +360,6 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
// we've got new status for the queue item, batch and queue
const {
item_id,
- session_id,
status,
batch_status,
error_type,
@@ -381,7 +369,6 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
updated_at,
completed_at,
error_traceback,
- credits,
} = data;
log.debug({ data }, `Queue item ${item_id} status updated: ${status}`);
@@ -396,7 +383,6 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
draft.error_type = error_type;
draft.error_message = error_message;
draft.error_traceback = error_traceback;
- draft.credits = credits;
})
);
@@ -441,67 +427,17 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
} else if (status === 'completed' || status === 'failed' || status === 'canceled') {
finishedQueueItemIds.set(item_id, true);
if (status === 'failed' && error_type) {
- const isLocal = getState().config.isLocal ?? true;
- const sessionId = session_id;
-
toast({
id: `INVOCATION_ERROR_${error_type}`,
title: getTitle(error_type),
status: 'error',
duration: null,
- updateDescription: isLocal,
- description: (
-
- ),
+ updateDescription: true,
+ description: ,
});
}
// If the queue item is completed, failed, or cancelled, we want to clear the last progress event
$lastProgressEvent.set(null);
- // $progressImages.setKey(session_id, undefined);
-
- // When a validation run is completed, we want to clear the validation run batch ID & set the workflow as published
- const validationRunData = $validationRunData.get();
- if (!validationRunData || batch_status.batch_id !== validationRunData.batchId || status !== 'completed') {
- return;
- }
-
- // The published status of a workflow is server state, provided to the client in by the getWorkflow query.
- // After successfully publishing a workflow, we need to invalidate the query cache so that the published status is
- // seen throughout the app. We also need to reset the publish flow state.
- //
- // But, there is a race condition! If we invalidate the query cache and then immediately clear the publish flow state,
- // between the time when the publish state is cleared and the query is re-fetched, we will render the wrong UI.
- //
- // So, we really need to wait for the query re-fetch to complete before clearing the publish flow state. This isn't
- // possible using the `invalidateTags()` API. But we can fudge it by adding a once-off listener for that query.
-
- listenerMiddleware.startListening({
- matcher: isAnyOf(
- workflowsApi.endpoints.getWorkflow.matchFulfilled,
- workflowsApi.endpoints.getWorkflow.matchRejected
- ),
- effect: (action, listenerApi) => {
- if (workflowsApi.endpoints.getWorkflow.matchFulfilled(action)) {
- // If this query was re-fetching the workflow that was just published, we can clear the publish flow state and
- // unsubscribe from the listener
- if (action.payload.workflow_id === validationRunData.workflowId) {
- listenerApi.unsubscribe();
- $validationRunData.set(null);
- $isInPublishFlow.set(false);
- $outputNodeId.set(null);
- }
- } else if (workflowsApi.endpoints.getWorkflow.matchRejected(action)) {
- // If the query failed, we can unsubscribe from the listener
- listenerApi.unsubscribe();
- }
- },
- });
- dispatch(workflowsApi.util.invalidateTags([{ type: 'Workflow', id: validationRunData.workflowId }]));
}
});
diff --git a/invokeai/frontend/web/src/services/events/stores.ts b/invokeai/frontend/web/src/services/events/stores.ts
index 6d9b89c88a9..720ba920cf2 100644
--- a/invokeai/frontend/web/src/services/events/stores.ts
+++ b/invokeai/frontend/web/src/services/events/stores.ts
@@ -1,11 +1,9 @@
import { round } from 'es-toolkit/compat';
-import { atom, computed, map } from 'nanostores';
+import { atom, computed } from 'nanostores';
import type { S } from 'services/api/types';
import type { AppSocket } from 'services/events/types';
-import type { ManagerOptions, SocketOptions } from 'socket.io-client';
export const $socket = atom(null);
-export const $socketOptions = map>({});
export const $isConnected = atom(false);
export const $lastProgressEvent = atom(null);
diff --git a/invokeai/frontend/web/src/services/events/useSocketIO.ts b/invokeai/frontend/web/src/services/events/useSocketIO.ts
index 255bcee2fd4..cdbfb882247 100644
--- a/invokeai/frontend/web/src/services/events/useSocketIO.ts
+++ b/invokeai/frontend/web/src/services/events/useSocketIO.ts
@@ -1,7 +1,3 @@
-import { useStore } from '@nanostores/react';
-import { $authToken } from 'app/store/nanostores/authToken';
-import { $baseUrl } from 'app/store/nanostores/baseUrl';
-import { $isDebugging } from 'app/store/nanostores/isDebugging';
import { useAppStore } from 'app/store/storeHooks';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import type { MapStore } from 'nanostores';
@@ -12,7 +8,7 @@ import type { AppSocket } from 'services/events/types';
import type { ManagerOptions, SocketOptions } from 'socket.io-client';
import { io } from 'socket.io-client';
-import { $isConnected, $lastProgressEvent, $socket, $socketOptions } from './stores';
+import { $isConnected, $lastProgressEvent, $socket } from './stores';
// Inject socket options and url into window for debugging
declare global {
@@ -27,34 +23,22 @@ declare global {
export const useSocketIO = () => {
useAssertSingleton('useSocketIO');
const store = useAppStore();
- const baseUrl = useStore($baseUrl);
- const authToken = useStore($authToken);
- const addlSocketOptions = useStore($socketOptions);
const socketUrl = useMemo(() => {
const wsProtocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
- if (baseUrl) {
- return baseUrl.replace(/^https?:\/\//i, '');
- }
-
return `${wsProtocol}://${window.location.host}`;
- }, [baseUrl]);
+ }, []);
const socketOptions = useMemo(() => {
const options: Partial = {
timeout: 60000,
- path: baseUrl ? '/ws/socket.io' : `${window.location.pathname}ws/socket.io`,
+ path: `${window.location.pathname}ws/socket.io`,
autoConnect: false, // achtung! removing this breaks the dynamic middleware
forceNew: true,
};
- if (authToken) {
- options.auth = { token: authToken };
- options.transports = ['websocket', 'polling'];
- }
-
- return { ...options, ...addlSocketOptions };
- }, [authToken, addlSocketOptions, baseUrl]);
+ return options;
+ }, []);
useEffect(() => {
const socket: AppSocket = io(socketUrl, socketOptions);
@@ -64,8 +48,7 @@ export const useSocketIO = () => {
socket.connect();
- if ($isDebugging.get() || import.meta.env.MODE === 'development') {
- window.$socketOptions = $socketOptions;
+ if (import.meta.env.MODE === 'development') {
// This is only enabled manually for debugging, console is allowed.
/* eslint-disable-next-line no-console */
console.log('Socket initialized', socket);
@@ -79,7 +62,7 @@ export const useSocketIO = () => {
});
return () => {
- if ($isDebugging.get() || import.meta.env.MODE === 'development') {
+ if (import.meta.env.MODE === 'development') {
window.$socketOptions = undefined;
// This is only enabled manually for debugging, console is allowed.
/* eslint-disable-next-line no-console */
diff --git a/invokeai/frontend/web/vite.config.mts b/invokeai/frontend/web/vite.config.mts
index a697148322d..d15c35d6bce 100644
--- a/invokeai/frontend/web/vite.config.mts
+++ b/invokeai/frontend/web/vite.config.mts
@@ -1,71 +1,12 @@
///
import react from '@vitejs/plugin-react-swc';
-import path from 'path';
import { visualizer } from 'rollup-plugin-visualizer';
-import type { PluginOption } from 'vite';
import { defineConfig } from 'vite';
-import cssInjectedByJsPlugin from 'vite-plugin-css-injected-by-js';
-import dts from 'vite-plugin-dts';
import eslint from 'vite-plugin-eslint';
import tsconfigPaths from 'vite-tsconfig-paths';
import { loggerContextPlugin } from './vite-plugin-logger-context';
export default defineConfig(({ mode }) => {
- if (mode === 'package') {
- return {
- base: './',
- plugins: [
- react(),
- eslint(),
- tsconfigPaths(),
- loggerContextPlugin(),
- visualizer(),
- dts({
- insertTypesEntry: true,
- }),
- cssInjectedByJsPlugin(),
- ],
- build: {
- /**
- * zone.js (via faro) requires max ES2015 to prevent spamming unhandled promise rejections.
- *
- * See:
- * - https://github.com/grafana/faro-web-sdk/issues/566
- * - https://github.com/angular/angular/issues/51328
- * - https://github.com/open-telemetry/opentelemetry-js/issues/3030
- */
- target: 'ES2015',
- cssCodeSplit: true,
- lib: {
- entry: path.resolve(__dirname, './src/index.ts'),
- name: 'InvokeAIUI',
- fileName: (format) => `invoke-ai-ui.${format}.js`,
- },
- rollupOptions: {
- external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react', '@invoke-ai/ui-library'],
- output: {
- globals: {
- react: 'React',
- 'react-dom': 'ReactDOM',
- '@emotion/react': 'EmotionReact',
- '@invoke-ai/ui-library': 'UiLibrary',
- },
- },
- },
- },
- resolve: {
- alias: {
- app: path.resolve(__dirname, './src/app'),
- assets: path.resolve(__dirname, './src/assets'),
- common: path.resolve(__dirname, './src/common'),
- features: path.resolve(__dirname, './src/features'),
- services: path.resolve(__dirname, './src/services'),
- theme: path.resolve(__dirname, './src/theme'),
- },
- },
- };
- }
-
return {
base: './',
plugins: [
diff --git a/invokeai/invocation_api/__init__.py b/invokeai/invocation_api/__init__.py
index 6094b28c5dc..9069f6d4a7a 100644
--- a/invokeai/invocation_api/__init__.py
+++ b/invokeai/invocation_api/__init__.py
@@ -29,7 +29,6 @@
OutputField,
UIComponent,
UIType,
- VideoField,
WithBoard,
WithMetadata,
WithWorkflow,
@@ -68,7 +67,6 @@
LatentsOutput,
StringCollectionOutput,
StringOutput,
- VideoOutput,
)
from invokeai.app.invocations.scheduler import SchedulerOutput
from invokeai.app.services.boards.boards_common import BoardDTO
@@ -115,7 +113,6 @@
"OutputField",
"UIComponent",
"UIType",
- "VideoField",
"WithBoard",
"WithMetadata",
"WithWorkflow",
@@ -157,7 +154,6 @@
"LatentsOutput",
"StringCollectionOutput",
"StringOutput",
- "VideoOutput",
# invokeai.app.services.image_records.image_records_common
"ImageCategory",
# invokeai.app.services.boards.boards_common
diff --git a/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json b/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json
index 6c49fc63bcb..b05524b241f 100644
--- a/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json
+++ b/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910
-size 2776833
+oid sha256:01154a4426e6077c8a3f04fca42edb5293bac73a7faed666901f25591ef89182
+size 3383407